From 83b12eec280aac82b50ed0d1214f4751c1f4e70f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Tue, 19 Mar 2024 10:51:12 +0000 Subject: [PATCH 01/26] Make the LoP follow the GSM states This also involves some fairly important changes to the leaky bucket and to the leaky bucket API. These changes make the leaky bucket significantly more robust. Co-authored-by: Alexander Esgen --- .../Ouroboros/Consensus/Network/NodeToNode.hs | 3 +- .../Ouroboros/Consensus/Node.hs | 2 +- .../Ouroboros/Consensus/Node/GSM.hs | 34 +- .../Ouroboros/Consensus/NodeKernel.hs | 28 +- .../Test/Consensus/PeerSimulator/ChainSync.hs | 4 +- ouroboros-consensus/ouroboros-consensus.cabal | 2 + .../MiniProtocol/ChainSync/Client.hs | 73 +++- .../MiniProtocol/ChainSync/Client/State.hs | 17 +- .../Ouroboros/Consensus/Node/GsmState.hs | 23 + .../Ouroboros/Consensus/Util/LeakyBucket.hs | 409 +++++++++++++----- .../MiniProtocol/ChainSync/Client.hs | 7 +- .../Consensus/Util/LeakyBucket/Tests.hs | 248 +++++++---- 12 files changed, 575 insertions(+), 275 deletions(-) create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/GsmState.hs diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs index 4dbf6a9361..abb195eb65 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToNode.hs @@ -41,7 +41,7 @@ import qualified Codec.CBOR.Decoding as CBOR import Codec.CBOR.Encoding (Encoding) import qualified Codec.CBOR.Encoding as CBOR import Codec.CBOR.Read (DeserialiseFailure) -import Control.Concurrent.Class.MonadSTM.Strict.TVar as TVar.Unchecked +import qualified Control.Concurrent.Class.MonadSTM.Strict.TVar as TVar.Unchecked import Control.Monad.Class.MonadTime.SI (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer @@ -570,6 +570,7 @@ mkApps kernel Tracers {..} mkCodecs ByteLimits {..} genChainSyncTimeout lopBucke (contramap (TraceLabelPeer them) (Node.chainSyncClientTracer (getTracers kernel))) (CsClient.defaultChainDbView (getChainDB kernel)) (getChainSyncHandles kernel) + (getGsmState kernel) them version lopBucketConfig diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs index b2e0609579..9fc7c83b3c 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs @@ -629,7 +629,7 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = LedgerPeersConsensusInterface { lpGetLatestSlot = getImmTipSlot kernel, lpGetLedgerPeers = fromMaybe [] <$> getPeersFromCurrentLedger kernel (const True), - lpGetLedgerStateJudgement = getLedgerStateJudgement kernel + lpGetLedgerStateJudgement = GSM.gsmStateToLedgerJudgement <$> getGsmState kernel }, Diffusion.daUpdateOutboundConnectionsState = let varOcs = getOutboundConnectionsState kernel in \newOcs -> do diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs index 41cb3aaf24..8c420f370d 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs @@ -19,11 +19,13 @@ module Ouroboros.Consensus.Node.GSM ( -- * Auxiliaries , TraceGsmEvent (..) , gsmStateToLedgerJudgement - , initializationLedgerJudgement + , initializationGsmState -- * Constructors , realDurationUntilTooOld , realGsmEntryPoints , realMarkerFileView + -- * Re-exported + , module Ouroboros.Consensus.Node.GsmState ) where import qualified Cardano.Slotting.Slot as Slot @@ -43,6 +45,7 @@ import qualified Ouroboros.Consensus.HardFork.Abstract as HardFork import qualified Ouroboros.Consensus.HardFork.History as HardFork import qualified Ouroboros.Consensus.HardFork.History.Qry as Qry import qualified Ouroboros.Consensus.Ledger.Basics as L +import Ouroboros.Consensus.Node.GsmState import Ouroboros.Consensus.Storage.ChainDB.API (ChainDB) import Ouroboros.Consensus.Util.NormalForm.StrictTVar (StrictTVar) import qualified Ouroboros.Consensus.Util.NormalForm.StrictTVar as StrictSTM @@ -80,19 +83,6 @@ data CandidateVersusSelection = -- ^ Whether the candidate is better than the selection deriving (Eq, Show) --- | Current state of the Genesis State Machine -data GsmState = - PreSyncing - -- ^ We are syncing, and the Honest Availability Assumption is not - -- satisfied. - | - Syncing - -- ^ We are syncing, and the Honest Availability Assumption is satisfied. - | - CaughtUp - -- ^ We are caught-up. - deriving (Eq, Show, Read) - data GsmView m upstreamPeer selection chainSyncState = GsmView { antiThunderingHerd :: Maybe StdGen -- ^ An initial seed used to randomly increase 'minCaughtUpDuration' by up @@ -168,10 +158,10 @@ data GsmEntryPoints m = GsmEntryPoints { ----- --- | Determine the initial 'LedgerStateJudgment' +-- | Determine the initial 'GsmState' -- -- Also initializes the persistent marker file. -initializationLedgerJudgement :: +initializationGsmState :: ( L.GetTip (L.LedgerState blk) , Monad m ) @@ -179,23 +169,23 @@ initializationLedgerJudgement :: -> Maybe (WrapDurationUntilTooOld m blk) -- ^ 'Nothing' if @blk@ has no age limit -> MarkerFileView m - -> m LedgerStateJudgement -initializationLedgerJudgement + -> m GsmState +initializationGsmState getCurrentLedger mbDurationUntilTooOld markerFileView = do wasCaughtUp <- hasMarkerFile markerFileView - if not wasCaughtUp then pure TooOld else do + if not wasCaughtUp then pure PreSyncing else do case mbDurationUntilTooOld of - Nothing -> return YoungEnough + Nothing -> return CaughtUp Just wd -> do sno <- L.getTipSlot <$> getCurrentLedger getDurationUntilTooOld wd sno >>= \case - After{} -> return YoungEnough + After{} -> return CaughtUp Already -> do removeMarkerFile markerFileView - return TooOld + return PreSyncing -- | For 'LedgerStateJudgement' as used in the Diffusion layer, there is no -- difference between 'PreSyncing' and 'Syncing'. diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index 94d42910a3..b4dd556373 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -80,6 +80,8 @@ import Ouroboros.Consensus.Util.AnchoredFragment (preferAnchoredCandidate) import Ouroboros.Consensus.Util.EarlyExit import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.LeakyBucket + (atomicallyWithMonotonicTime) import Ouroboros.Consensus.Util.Orphans () import Ouroboros.Consensus.Util.ResourceRegistry import Ouroboros.Consensus.Util.STM @@ -130,9 +132,10 @@ data NodeKernel m addrNTN addrNTC blk = NodeKernel { -- , getFetchMode :: STM m FetchMode - -- | The ledger judgement, used by diffusion. + -- | The GSM state, used by diffusion. A ledger judgement can be derived + -- from it with 'GSM.gsmStateToLedgerJudgement'. -- - , getLedgerStateJudgement :: STM m LedgerStateJudgement + , getGsmState :: STM m GSM.GsmState -- | The kill handle and exposed state for each ChainSync client. , getChainSyncHandles :: StrictTVar m (Map (ConnectionId addrNTN) (ChainSyncClientHandle m blk)) @@ -206,7 +209,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , mempool , peerSharingRegistry , varChainSyncHandles - , varLedgerJudgement + , varGsmState } = st varOutboundConnectionsState <- newTVarIO UntrustedState @@ -244,8 +247,11 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , GSM.setCaughtUpPersistentMark = \upd -> (if upd then GSM.touchMarkerFile else GSM.removeMarkerFile) gsmMarkerFileView - , GSM.writeGsmState = \x -> atomically $ do - writeTVar varLedgerJudgement $ GSM.gsmStateToLedgerJudgement x + , GSM.writeGsmState = \gsmState -> + atomicallyWithMonotonicTime $ \time -> do + writeTVar varGsmState gsmState + handles <- readTVar varChainSyncHandles + traverse_ (($ time) . ($ gsmState) . cschGsmCallback) handles , GSM.isHaaSatisfied = do readTVar varOutboundConnectionsState <&> \case -- See the upstream Haddocks for the exact conditions under @@ -253,7 +259,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers TrustedStateWithExternalPeers -> True UntrustedState -> False } - judgment <- readTVarIO varLedgerJudgement + judgment <- GSM.gsmStateToLedgerJudgement <$> readTVarIO varGsmState void $ forkLinkedThread registry "NodeKernel.GSM" $ case judgment of TooOld -> GSM.enterPreSyncing gsm YoungEnough -> GSM.enterCaughtUp gsm @@ -282,7 +288,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , getTopLevelConfig = cfg , getFetchClientRegistry = fetchClientRegistry , getFetchMode = readFetchMode blockFetchInterface - , getLedgerStateJudgement = readTVar varLedgerJudgement + , getGsmState = readTVar varGsmState , getChainSyncHandles = varChainSyncHandles , getPeerSharingRegistry = peerSharingRegistry , getTracers = tracers @@ -317,9 +323,9 @@ data InternalState m addrNTN addrNTC blk = IS { , blockFetchInterface :: BlockFetchConsensusInterface (ConnectionId addrNTN) (Header blk) blk m , fetchClientRegistry :: FetchClientRegistry (ConnectionId addrNTN) (Header blk) blk m , varChainSyncHandles :: StrictTVar m (Map (ConnectionId addrNTN) (ChainSyncClientHandle m blk)) + , varGsmState :: StrictTVar m GSM.GsmState , mempool :: Mempool m blk , peerSharingRegistry :: PeerSharingRegistry addrNTN m - , varLedgerJudgement :: StrictTVar m LedgerStateJudgement } initInternalState :: @@ -336,9 +342,9 @@ initInternalState NodeKernelArgs { tracers, chainDB, registry, cfg , mempoolCapacityOverride , gsmArgs, getUseBootstrapPeers } = do - varLedgerJudgement <- do + varGsmState <- do let GsmNodeKernelArgs {..} = gsmArgs - j <- GSM.initializationLedgerJudgement + j <- GSM.initializationGsmState (atomically $ ledgerState <$> ChainDB.getCurrentLedger chainDB) gsmDurationUntilTooOld gsmMarkerFileView @@ -362,7 +368,7 @@ initInternalState NodeKernelArgs { tracers, chainDB, registry, cfg btime (ChainDB.getCurrentChain chainDB) getUseBootstrapPeers - (readTVar varLedgerJudgement) + (GSM.gsmStateToLedgerJudgement <$> readTVar varGsmState) blockFetchInterface :: BlockFetchConsensusInterface (ConnectionId addrNTN) (Header blk) blk m blockFetchInterface = BlockFetchClientInterface.mkBlockFetchConsensusInterface (configBlock cfg) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs index 21894f8811..2197bea732 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs @@ -26,6 +26,7 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client Consensus, bracketChainSyncClient, chainSyncClient) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck +import Ouroboros.Consensus.Node.GsmState (GsmState (Syncing)) import Ouroboros.Consensus.Util (ShowProxy) import Ouroboros.Consensus.Util.IOLike (Exception (fromException), IOLike, MonadCatch (try), StrictTVar) @@ -139,11 +140,12 @@ runChainSyncClient csjConfig StateViewTracers {svtPeerSimulatorResultsTracer} varHandles - channel = do + channel = bracketChainSyncClient nullTracer chainDbView varHandles + (pure Syncing) peerId (maxBound :: NodeToNodeVersion) lopBucketConfig diff --git a/ouroboros-consensus/ouroboros-consensus.cabal b/ouroboros-consensus/ouroboros-consensus.cabal index 93a5b01352..24affe240a 100644 --- a/ouroboros-consensus/ouroboros-consensus.cabal +++ b/ouroboros-consensus/ouroboros-consensus.cabal @@ -175,6 +175,7 @@ library Ouroboros.Consensus.MiniProtocol.LocalStateQuery.Server Ouroboros.Consensus.MiniProtocol.LocalTxMonitor.Server Ouroboros.Consensus.MiniProtocol.LocalTxSubmission.Server + Ouroboros.Consensus.Node.GsmState Ouroboros.Consensus.Node.InitStorage Ouroboros.Consensus.Node.NetworkProtocolVersion Ouroboros.Consensus.Node.ProtocolInfo @@ -590,6 +591,7 @@ test-suite infra-test build-depends: QuickCheck, base, + io-classes, io-sim, mtl, ouroboros-consensus:{ouroboros-consensus, unstable-consensus-testlib}, diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs index 6d23553141..8a08bff1d6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs @@ -74,6 +74,7 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client ( ) where import Control.Monad (join, void) +import Control.Monad.Class.MonadTimer (MonadTimer) import Control.Monad.Except (runExcept, throwError) import Control.Tracer import Data.Kind (Type) @@ -102,6 +103,7 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping as Jumping import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State +import Ouroboros.Consensus.Node.GsmState (GsmState (..)) import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB (ChainDB, @@ -113,6 +115,8 @@ import Ouroboros.Consensus.Util.Assert (assertWithMsg) import Ouroboros.Consensus.Util.EarlyExit (WithEarlyExit, exitEarly) import qualified Ouroboros.Consensus.Util.EarlyExit as EarlyExit import Ouroboros.Consensus.Util.IOLike hiding (handle) +import Ouroboros.Consensus.Util.LeakyBucket + (atomicallyWithMonotonicTime) import qualified Ouroboros.Consensus.Util.LeakyBucket as LeakyBucket import Ouroboros.Consensus.Util.STM (Fingerprint, Watcher (..), WithFingerprint (..), withWatcher) @@ -313,16 +317,20 @@ deriving anyclass instance ( NoThunks (Header blk) ) => NoThunks (ChainSyncStateView m blk) -bracketChainSyncClient :: forall m peer blk a. +bracketChainSyncClient :: + forall m peer blk a. ( IOLike m , Ord peer , LedgerSupportsProtocol blk + , MonadTimer m ) => Tracer m (TraceChainSyncClientEvent blk) -> ChainDbView m blk -> StrictTVar m (Map peer (ChainSyncClientHandle m blk)) -- ^ The kill handle and states for each peer, we need the whole map because we -- (de)register nodes (@peer@). + -> STM m GsmState + -- ^ A function giving the current GSM state; only used at startup. -> peer -> NodeToNodeVersion -> ChainSyncLoPBucketConfig @@ -333,19 +341,21 @@ bracketChainSyncClient tracer ChainDbView { getIsInvalidBlock } varHandles + getGsmState peer version csBucketConfig csjConfig body - = mkChainSyncClientHandleState >>= \csHandleState -> - withCSJCallbacks csHandleState csjConfig $ \csjCallbacks -> + = + LeakyBucket.execAgainstBucket' + $ \lopBucket -> + mkChainSyncClientHandleState >>= \csHandleState -> + withCSJCallbacks lopBucket csHandleState csjConfig $ \csjCallbacks -> withWatcher "ChainSync.Client.rejectInvalidBlocks" (invalidBlockWatcher csHandleState) - $ LeakyBucket.execAgainstBucket lopBucketConfig - $ \lopBucket -> - body ChainSyncStateView { + $ body ChainSyncStateView { csvSetCandidate = modifyTVar csHandleState . \ c s -> s {csCandidate = c} , csvSetLatestSlot = @@ -355,9 +365,9 @@ bracketChainSyncClient , idlingStop = atomically $ modifyTVar csHandleState $ \ s -> s {csIdling = False} } , csvLoPBucket = LoPBucket { - lbPause = LeakyBucket.setPaused lopBucket True - , lbResume = LeakyBucket.setPaused lopBucket False - , lbGrantToken = void $ LeakyBucket.fill lopBucket 1 + lbPause = LeakyBucket.setPaused' lopBucket True + , lbResume = LeakyBucket.setPaused' lopBucket False + , lbGrantToken = void $ LeakyBucket.fill' lopBucket 1 } , csvJumping = csjCallbacks } @@ -370,34 +380,43 @@ bracketChainSyncClient } withCSJCallbacks :: + LeakyBucket.Handlers m -> StrictTVar m (ChainSyncState blk) -> CSJConfig -> (Jumping.Jumping m blk -> m a) -> m a - withCSJCallbacks cschState CSJDisabled f = do + withCSJCallbacks lopBucket cschState CSJDisabled f = do tid <- myThreadId cschJumpInfo <- newTVarIO Nothing cschJumping <- newTVarIO (Disengaged DisengagedDone) let handle = ChainSyncClientHandle { cschGDDKill = throwTo tid DensityTooLow + , cschGsmCallback = updateLopBucketConfig lopBucket , cschState , cschJumping , cschJumpInfo } - insertHandle = atomically $ modifyTVar varHandles $ Map.insert peer handle + insertHandle = atomicallyWithMonotonicTime $ \time -> do + initialGsmState <- getGsmState + updateLopBucketConfig lopBucket initialGsmState time + modifyTVar varHandles $ Map.insert peer handle deleteHandle = atomically $ modifyTVar varHandles $ Map.delete peer bracket_ insertHandle deleteHandle $ f Jumping.noJumping - withCSJCallbacks csHandleState (CSJEnabled csjEnabledConfig) f = - bracket (acquireContext csHandleState csjEnabledConfig) releaseContext $ \peerContext -> + withCSJCallbacks lopBucket csHandleState (CSJEnabled csjEnabledConfig) f = + bracket (acquireContext lopBucket csHandleState csjEnabledConfig) releaseContext $ \peerContext -> f $ Jumping.mkJumping peerContext - acquireContext cschState (CSJEnabledConfig jumpSize) = do + + acquireContext lopBucket cschState (CSJEnabledConfig jumpSize) = do tid <- myThreadId - atomically $ do + atomicallyWithMonotonicTime $ \time -> do + initialGsmState <- getGsmState + updateLopBucketConfig lopBucket initialGsmState time cschJumpInfo <- newTVar Nothing context <- Jumping.makeContext varHandles jumpSize Jumping.registerClient context peer cschState $ \cschJumping -> ChainSyncClientHandle { cschGDDKill = throwTo tid DensityTooLow + , cschGsmCallback = updateLopBucketConfig lopBucket , cschState , cschJumping , cschJumpInfo @@ -409,19 +428,33 @@ bracketChainSyncClient invalidBlockRejector tracer version getIsInvalidBlock (csCandidate <$> readTVar varState) + -- | Update the configuration of the bucket to match the given GSM state. + -- NOTE: The new level is currently the maximal capacity of the bucket; + -- maybe we want to change that later. + updateLopBucketConfig :: LeakyBucket.Handlers m -> GsmState -> Time -> STM m () + updateLopBucketConfig lopBucket gsmState = + LeakyBucket.updateConfig lopBucket $ \_ -> + let config = lopBucketConfig gsmState in + (LeakyBucket.capacity config, config) + -- | Wrapper around 'LeakyBucket.execAgainstBucket' that handles the -- disabled bucket by running the given action with dummy handlers. - lopBucketConfig :: LeakyBucket.Config m - lopBucketConfig = - case csBucketConfig of - ChainSyncLoPBucketDisabled -> LeakyBucket.dummyConfig - ChainSyncLoPBucketEnabled ChainSyncLoPBucketEnabledConfig {csbcCapacity, csbcRate} -> + lopBucketConfig :: GsmState -> LeakyBucket.Config m + lopBucketConfig gsmState = + case (gsmState, csBucketConfig) of + (Syncing, ChainSyncLoPBucketEnabled ChainSyncLoPBucketEnabledConfig {csbcCapacity, csbcRate}) -> LeakyBucket.Config { capacity = fromInteger $ csbcCapacity, rate = csbcRate, onEmpty = throwIO EmptyBucket, fillOnOverflow = True } + -- NOTE: If we decide to slow the bucket down when “almost caught-up”, + -- we should add a state to the GSM and corresponding configuration + -- fields and a bucket config here. + (_, ChainSyncLoPBucketDisabled) -> LeakyBucket.dummyConfig + (PreSyncing, ChainSyncLoPBucketEnabled _) -> LeakyBucket.dummyConfig + (CaughtUp, ChainSyncLoPBucketEnabled _) -> LeakyBucket.dummyConfig -- Our task: after connecting to an upstream node, try to maintain an -- up-to-date header-only fragment representing their chain. We maintain diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs index 189ee361e5..c75e3d3530 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs @@ -27,8 +27,9 @@ import Ouroboros.Consensus.Block (HasHeader, Header, Point) import Ouroboros.Consensus.HeaderStateHistory (HeaderStateHistory) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) -import Ouroboros.Consensus.Util.IOLike (IOLike, NoThunks (..), - StrictTVar) +import Ouroboros.Consensus.Node.GsmState (GsmState) +import Ouroboros.Consensus.Util.IOLike (IOLike, NoThunks (..), STM, + StrictTVar, Time) import Ouroboros.Network.AnchoredFragment (AnchoredFragment, headPoint) @@ -69,17 +70,21 @@ deriving anyclass instance ( -- the GDD governor. data ChainSyncClientHandle m blk = ChainSyncClientHandle { -- | Disconnects from the peer when the GDD considers it adversarial - cschGDDKill :: !(m ()) + cschGDDKill :: !(m ()) + + -- | Callback called by the GSM when the GSM state changes. They take the + -- current time and should execute rapidly. Used to enable/disable the LoP. + , cschGsmCallback :: !(GsmState -> Time -> STM m ()) -- | Data shared between the client and external components like GDD. - , cschState :: !(StrictTVar m (ChainSyncState blk)) + , cschState :: !(StrictTVar m (ChainSyncState blk)) -- | The state of the peer with respect to ChainSync jumping. - , cschJumping :: !(StrictTVar m (ChainSyncJumpingState m blk)) + , cschJumping :: !(StrictTVar m (ChainSyncJumpingState m blk)) -- | ChainSync state needed to jump to the tip of the candidate fragment of -- the peer. - , cschJumpInfo :: !(StrictTVar m (Maybe (JumpInfo blk))) + , cschJumpInfo :: !(StrictTVar m (Maybe (JumpInfo blk))) } deriving stock (Generic) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/GsmState.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/GsmState.hs new file mode 100644 index 0000000000..1e3405645b --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/GsmState.hs @@ -0,0 +1,23 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} + +-- | This module contains the definition of a state in the Genesis State Machine +-- (GSM). The GSM itself is defined in 'ouroboros-consensus-diffusion', but the +-- ChainSync client relies on its state. +module Ouroboros.Consensus.Node.GsmState (GsmState (..)) where + +import GHC.Generics (Generic) +import NoThunks.Class (NoThunks) + +-- | Current state of the Genesis State Machine +data GsmState = + PreSyncing + -- ^ We are syncing, and the Honest Availability Assumption is not + -- satisfied. + | + Syncing + -- ^ We are syncing, and the Honest Availability Assumption is satisfied. + | + CaughtUp + -- ^ We are caught-up. + deriving (Eq, Show, Read, Generic, NoThunks) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/LeakyBucket.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/LeakyBucket.hs index 41b575c128..b4951bb7d3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/LeakyBucket.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/LeakyBucket.hs @@ -30,18 +30,30 @@ module Ouroboros.Consensus.Util.LeakyBucket ( Config (..) , Handlers (..) , State (..) + , atomicallyWithMonotonicTime , diffTimeToSecondsRational , dummyConfig , evalAgainstBucket , execAgainstBucket + , execAgainstBucket' + , fill' + , microsecondsPerSecond + , picosecondsPerSecond , runAgainstBucket , secondsRationalToDiffTime + , setPaused' + , updateConfig' ) where +import Control.Monad (void, when) +import qualified Control.Monad.Class.MonadSTM.Internal as TVar +import Control.Monad.Class.MonadTimer (MonadTimer, registerDelay) +import Control.Monad.Class.MonadTimer.SI (diffTimeToMicrosecondsAsInt) import Data.Ratio ((%)) import Data.Time.Clock (diffTimeToPicoseconds) import GHC.Generics (Generic) -import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.IOLike hiding (killThread) +import Ouroboros.Consensus.Util.STM (blockUntilChanged) import Prelude hiding (init) -- | Configuration of a leaky bucket. @@ -57,7 +69,7 @@ data Config m = Config } deriving (Generic) -deriving instance NoThunks (m ()) => NoThunks (Config m) +deriving instance (NoThunks (m ())) => NoThunks (Config m) -- | A configuration for a bucket that does nothing. dummyConfig :: (Applicative m) => Config m @@ -70,118 +82,197 @@ dummyConfig = } -- | State of a leaky bucket, giving the level and the associated time. -data State cfg = State - { level :: !Rational, - time :: !Time, - paused :: !Bool, - config :: !cfg +data State m = State + { level :: !Rational, + time :: !Time, + paused :: !Bool, + configGeneration :: !Int, + config :: !(Config m) } - deriving (Eq, Show, Generic, NoThunks) + deriving (Generic) + +deriving instance (NoThunks (m ())) => NoThunks (State m) --- | A bucket is simply a TVar of a state. -type Bucket m = StrictTVar m (State (Config m)) +-- | A bucket is simply a TVar of a state. The state carries a 'Config' and an +-- integer (a “generation”) to detect changes in the configuration. +type Bucket m = StrictTVar m (State m) -- | Whether filling the bucket overflew. data FillResult = Overflew | DidNotOverflow -- | The handlers to a bucket: contains the API to interact with a running --- bucket. +-- bucket. All the endpoints are STM but require the current time; the easy way +-- to provide this being 'atomicallyWithMonotonicTime'. data Handlers m = Handlers { -- | Refill the bucket by the given amount and returns whether the bucket -- overflew. The bucket may silently get filled to full capacity or not get -- filled depending on 'fillOnOverflow'. - fill :: !(Rational -> m FillResult), + fill :: + !( Rational -> + Time -> + STM m FillResult + ), -- | Pause or resume the bucket. Pausing stops the bucket from leaking until -- it is resumed. It is still possible to fill it during that time. @setPaused -- True@ and @setPaused False@ are idempotent. - setPaused :: !(Bool -> m ()), - -- | Dynamically update the configuration of the bucket. - updateConfig :: !((Config m -> Config m) -> m ()) + setPaused :: + !( Bool -> + Time -> + STM m () + ), + -- | Dynamically update the level and configuration of the bucket. Updating + -- the level matters if the capacity changes, in particular. If updating + -- leave the bucket empty, the action is triggered immediately. + updateConfig :: + !( ((Rational, Config m) -> (Rational, Config m)) -> + Time -> + STM m () + ) } +-- | Variant of 'fill' already wrapped in 'atomicallyWithMonotonicTime'. +fill' :: + ( MonadMonotonicTime m, + MonadSTM m + ) => + Handlers m -> + Rational -> + m FillResult +fill' h r = atomicallyWithMonotonicTime $ fill h r + +-- | Variant of 'setPaused' already wrapped in 'atomicallyWithMonotonicTime'. +setPaused' :: + ( MonadMonotonicTime m, + MonadSTM m + ) => + Handlers m -> + Bool -> + m () +setPaused' h p = atomicallyWithMonotonicTime $ setPaused h p + +-- | Variant of 'updateConfig' already wrapped in 'atomicallyWithMonotonicTime'. +updateConfig' :: + ( MonadMonotonicTime m, + MonadSTM m + ) => + Handlers m -> + ((Rational, Config m) -> (Rational, Config m)) -> + m () +updateConfig' h f = atomicallyWithMonotonicTime $ updateConfig h f + -- | Create a bucket with the given configuration, then run the action against -- that bucket. Returns when the action terminates or the bucket empties. In the -- first case, return the value returned by the action. In the second case, -- return @Nothing@. execAgainstBucket :: - (MonadDelay m, MonadAsync m, MonadFork m, MonadMask m, NoThunks (m ())) => + ( MonadDelay m, + MonadAsync m, + MonadFork m, + MonadMask m, + MonadTimer m, + NoThunks (m ()) + ) => Config m -> (Handlers m -> m a) -> m a execAgainstBucket config action = snd <$> runAgainstBucket config action +-- | Variant of 'execAgainstBucket' that uses a dummy configuration. This only +-- makes sense for actions that use 'updateConfig'. +execAgainstBucket' :: + ( MonadDelay m, + MonadAsync m, + MonadFork m, + MonadMask m, + MonadTimer m, + NoThunks (m ()) + ) => + (Handlers m -> m a) -> + m a +execAgainstBucket' action = + execAgainstBucket dummyConfig action + -- | Same as 'execAgainstBucket' but returns the 'State' of the bucket when the -- action terminates. Exposed for testing purposes. evalAgainstBucket :: - (MonadDelay m, MonadAsync m, MonadFork m, MonadMask m, NoThunks (m ())) => + (MonadDelay m, MonadAsync m, MonadFork m, MonadMask m, MonadTimer m, NoThunks (m ()) + ) => Config m -> (Handlers m -> m a) -> - m (State (Config m)) + m (State m) evalAgainstBucket config action = fst <$> runAgainstBucket config action -- | Same as 'execAgainstBucket' but also returns the 'State' of the bucket when -- the action terminates. Exposed for testing purposes. runAgainstBucket :: forall m a. - (MonadDelay m, MonadAsync m, MonadFork m, MonadMask m, NoThunks (m ())) => + ( MonadDelay m, + MonadAsync m, + MonadFork m, + MonadMask m, + MonadTimer m, + NoThunks (m ()) + ) => Config m -> (Handlers m -> m a) -> - m (State (Config m), a) + m (State m, a) runAgainstBucket config action = do - bucket <- init config + runThreadVar <- atomically newEmptyTMVar -- see note [Leaky bucket design]. tid <- myThreadId - killThreadVar <- newTVarIO Nothing - finally - ( do - startThread killThreadVar bucket tid - result <- - action $ - Handlers - { fill = (snd <$>) . snapshotFill bucket, - setPaused = setPaused bucket, - updateConfig = updateConfig killThreadVar bucket tid - } - state <- snapshot bucket - pure (state, result) - ) - (stopThread killThreadVar) + bucket <- init config + withAsync (leak runThreadVar tid bucket) $ \_ -> do + atomicallyWithMonotonicTime $ maybeStartThread Nothing runThreadVar bucket + result <- + action $ + Handlers + { fill = \r t -> (snd <$>) $ snapshotFill bucket r t, + setPaused = setPaused bucket, + updateConfig = updateConfig runThreadVar bucket + } + state <- atomicallyWithMonotonicTime $ snapshot bucket + pure (state, result) where - startThread :: - StrictTVar m (Maybe (m ())) -> - Bucket m -> - ThreadId m -> - m () - startThread killThreadVar bucket tid = - readTVarIO killThreadVar >>= \case - Just _ -> error "LeakyBucket: startThread called when a thread is already running" - Nothing -> (atomically . writeTVar killThreadVar) =<< leak bucket tid - - stopThread :: StrictTVar m (Maybe (m ())) -> m () - stopThread killThreadVar = - readTVarIO killThreadVar >>= \case - Just killThread' -> killThread' - Nothing -> pure () + -- Start the thread (that is, write to its 'runThreadVar') if it is useful. + -- Takes a potential old value of the 'runThreadVar' as first argument, + -- which will be increased to help differentiate between restarts. + maybeStartThread :: Maybe Int -> StrictTMVar m Int -> Bucket m -> Time -> STM m () + maybeStartThread oldRunThread runThreadVar bucket time = do + State {config = Config {rate}} <- snapshot bucket time + when (rate > 0) $ void $ tryPutTMVar runThreadVar $ maybe 0 (+ 1) oldRunThread - setPaused :: Bucket m -> Bool -> m () - setPaused bucket paused = do - newState <- snapshot bucket - atomically $ writeTVar bucket newState {paused} + setPaused :: Bucket m -> Bool -> Time -> STM m () + setPaused bucket paused time = do + newState <- snapshot bucket time + writeTVar bucket newState {paused} updateConfig :: - StrictTVar m (Maybe (m ())) -> + StrictTMVar m Int -> Bucket m -> - ThreadId m -> - (Config m -> Config m) -> - m () - updateConfig killThreadVar bucket tid = \f -> do - State {level, time, paused, config = oldConfig} <- snapshot bucket - let newConfig@Config {capacity = newCapacity, rate = newRate} = f oldConfig - newLevel = min newCapacity level - if - | newRate <= 0 -> stopThread killThreadVar - | newRate > rate oldConfig -> stopThread killThreadVar >> startThread killThreadVar bucket tid - | otherwise -> pure () - atomically $ writeTVar bucket State {level = newLevel, time, paused, config = newConfig} + ((Rational, Config m) -> (Rational, Config m)) -> + Time -> + STM m () + updateConfig runThreadVar bucket f time = do + State + { level = oldLevel, + paused, + configGeneration = oldConfigGeneration, + config = oldConfig + } <- + snapshot bucket time + let (newLevel, newConfig) = f (oldLevel, oldConfig) + Config {capacity = newCapacity} = newConfig + newLevel' = clamp (0, newCapacity) newLevel + writeTVar bucket $ + State + { level = newLevel', + time, + paused, + configGeneration = oldConfigGeneration + 1, + config = newConfig + } + -- Ensure that 'runThreadVar' is empty, then maybe start the thread. + oldRunThread <- tryTakeTMVar runThreadVar + maybeStartThread oldRunThread runThreadVar bucket time -- | Initialise a bucket given a configuration. The bucket starts full at the -- time where one calls 'init'. @@ -191,81 +282,163 @@ init :: m (Bucket m) init config@Config {capacity} = do time <- getMonotonicTime - newTVarIO $ State {time, level = capacity, paused = False, config} + newTVarIO $ + State + { time, + level = capacity, + paused = False, + configGeneration = 0, + config = config + } + +-- Note [Leaky bucket design] +-- ~~~~~~~~~~~~~~~~~~~~~~~~~~ +-- +-- The leaky bucket works by running the given action against a thread that +-- makes the bucket leak. Since that would be extremely inefficient to actually +-- remove tokens one by one from the token, the 'leak' thread instead looks at +-- the current state of the bucket, computes how much time it would take for the +-- bucket to empty, and then wait that amount of time. Once the wait is over, it +-- recurses, looks at the new state of the bucket, etc. If tokens were given to +-- the bucket via the action, the bucket is not empty and the loop continues. +-- +-- This description assumes that two things hold: +-- +-- - the bucket must be leaking (ie. rate is strictly positive), +-- - the action can only increase the waiting time (eg. by giving tokens). +-- +-- Neither of those properties hold in the general case. Indeed, it is possible +-- for the bucket to have a zero rate or even a negative one (for a more +-- traditional rate limiting bucket, for instance). Conversely, it is possible +-- for the action to lower the waiting time by changing the bucket configuration +-- to one where the rate is higher. +-- +-- We fix both those issues with one mechanism, the “runThreadVar”. It is an +-- MVar containing an integer that tells the thread whether it should be +-- running. An empty MVar means that the thread should not be running, for +-- instance if the rate is null. A full MVar (no matter what the integer is) +-- means that the thread should be running. When recursing, the thread blocks +-- until the MVar is full, and only then proceeds as described above. +-- Additionally, while waiting for the bucket to empty, the thread monitors +-- changes to the MVar, indicating either that the thread should stop running or +-- that the configuration changed as that it might have to wait less long. The +-- change in configuration is detected by changes in the integer. +-- +-- Note that we call “start”/“stop” running the action of filling/emtpying the +-- MVar. This is not to mistaken for the thread actually being spawned/killed. -- | Monadic action that calls 'threadDelay' until the bucket is empty, then --- returns @()@. It receives the 'ThreadId' argument of the action's thread, --- which it uses to throw exceptions at it; it returns a monadic action that can --- be used to interrupt the thread from the outside. +-- runs the 'onEmpty' action and terminates. See note [Leaky bucket design]. leak :: - (MonadDelay m, MonadCatch m, MonadFork m, MonadAsync m) => - Bucket m -> + ( MonadDelay m, + MonadCatch m, + MonadFork m, + MonadAsync m, + MonadTimer m + ) => + -- | A variable indicating whether the thread should run (when it is filled) + -- or not (otherwise). The integer it carries only helps in differentiating + -- between starts and restarts. 'leak' does not modify this variable. + StrictTMVar m Int -> + -- | The 'ThreadId' of the action's thread, which is used to throw exceptions + -- at it. ThreadId m -> - m (Maybe (m ())) -leak bucket actionThreadId = do - State {config = Config {rate}} <- snapshot bucket - if rate <= 0 - then pure Nothing - else do - a <- async go - pure $ Just $! uninterruptibleCancel a + Bucket m -> + m () +leak runThreadVar actionThreadId bucket = go where go = do - State {level, config = Config {rate, onEmpty}} <- snapshot bucket + -- Block until we are allowed to run. Do not modify the TMVar. + oldRunThread <- atomically $ readTMVar runThreadVar + -- NOTE: It is tempting to group this @atomically@ and + -- @atomicallyWithMonotonicTime@ into one; however, because the former is + -- blocking, the latter could get a _very_ inaccurate time, which we + -- cannot afford. + State {level, configGeneration = oldConfigGeneration, config = Config {rate, onEmpty}} <- + atomicallyWithMonotonicTime $ snapshot bucket let timeToWait = secondsRationalToDiffTime (level / rate) - -- NOTE: It is possible that @timeToWait == 0@ while @level > 0@ when @level@ - -- is so tiny that @level / rate@ rounds down to 0 picoseconds. In that case, - -- it is safe to assume that it is just zero. - if level <= 0 || timeToWait == 0 - then handle (\(e :: SomeException) -> throwTo actionThreadId e) onEmpty - else threadDelay timeToWait >> go + timeToWaitMicroseconds = diffTimeToMicrosecondsAsInt timeToWait + -- NOTE: It is possible that @timeToWait <= 1µs@ while @level > 0@ when + -- @level@ is extremely small. + if level <= 0 || timeToWaitMicroseconds <= 0 + then do + handle (\(e :: SomeException) -> throwTo actionThreadId e) onEmpty + -- We have run the action on empty, there is nothing left to do, + -- unless someone changes the configuration. + void $ atomically $ blockUntilChanged configGeneration oldConfigGeneration $ readTVar bucket + go + else do + -- Wait for the bucket to empty, or for the thread to be stopped or + -- restarted. Beware not to call 'registerDelay' with argument 0, that + -- is ensure that @timeToWaitMicroseconds > 0@. + varTimeout <- registerDelay timeToWaitMicroseconds + atomically $ + (check =<< TVar.readTVar varTimeout) + `orElse` + (void $ blockUntilChanged id (Just oldRunThread) $ tryReadTMVar runThreadVar) + go -- | Take a snapshot of the bucket, that is compute its state at the current -- time. snapshot :: - (MonadSTM m, MonadMonotonicTime m) => + ( MonadSTM m + ) => Bucket m -> - m (State (Config m)) -snapshot bucket = fst <$> snapshotFill bucket 0 + Time -> + STM m (State m) +snapshot bucket newTime = fst <$> snapshotFill bucket 0 newTime -- | Same as 'snapshot' but also adds the given quantity to the resulting -- level and returns whether this action overflew the bucket. -- -- REVIEW: What to do when 'toAdd' is negative? --- --- REVIEW: Really, this should all be an STM transaction. Now there is the risk --- that two snapshot-taking transactions interleave with the time measurement to --- get a slightly imprecise state (which is not the worst because everything --- should happen very fast). There is also the bigger risk that when we snapshot --- and then do something (eg. in the 'setPaused' handler) we interleave with --- something else. It cannot easily be an STM transaction, though, because we --- need to measure the time, and @io-classes@'s STM does not allow running IO in --- an STM. snapshotFill :: - (MonadSTM m, MonadMonotonicTime m) => + ( MonadSTM m + ) => Bucket m -> Rational -> - m (State (Config m), FillResult) -snapshotFill bucket toAdd = do - newTime <- getMonotonicTime - atomically $ do - State {level, time, paused, config} <- readTVar bucket - let Config {rate, capacity, fillOnOverflow} = config - elapsed = diffTime newTime time - leaked = if paused then 0 else (diffTimeToSecondsRational elapsed * rate) - levelLeaked = max 0 (level - leaked) - levelFilled = min capacity (levelLeaked + toAdd) - overflew = levelLeaked + toAdd > capacity - newLevel = if not overflew || fillOnOverflow then levelFilled else levelLeaked - newState = State {time = newTime, level = newLevel, paused, config} - writeTVar bucket newState - pure (newState, if overflew then Overflew else DidNotOverflow) + Time -> + STM m (State m, FillResult) +snapshotFill bucket toAdd newTime = do + State {level, time, paused, configGeneration, config = config} <- readTVar bucket + let Config {rate, capacity, fillOnOverflow} = config + elapsed = diffTime newTime time + leaked = if paused then 0 else (diffTimeToSecondsRational elapsed * rate) + levelLeaked = clamp (0, capacity) (level - leaked) + levelFilled = clamp (0, capacity) (levelLeaked + toAdd) + overflew = levelLeaked + toAdd > capacity + newLevel = if not overflew || fillOnOverflow then levelFilled else levelLeaked + newState = State {time = newTime, level = newLevel, paused, configGeneration, config} + writeTVar bucket newState + pure (newState, if overflew then Overflew else DidNotOverflow) -- | Convert a 'DiffTime' to a 'Rational' number of seconds. This is similar to -- 'diffTimeToSeconds' but with picoseconds precision. diffTimeToSecondsRational :: DiffTime -> Rational -diffTimeToSecondsRational = (% 1_000_000_000_000) . diffTimeToPicoseconds +diffTimeToSecondsRational = (% picosecondsPerSecond) . diffTimeToPicoseconds -- | Alias of 'realToFrac' to make code more readable and typing more explicit. secondsRationalToDiffTime :: Rational -> DiffTime secondsRationalToDiffTime = realToFrac + +-- | Helper around 'getMonotonicTime' and 'atomically'. +atomicallyWithMonotonicTime :: + ( MonadMonotonicTime m, + MonadSTM m + ) => + (Time -> STM m b) -> + m b +atomicallyWithMonotonicTime f = + atomically . f =<< getMonotonicTime + +-- NOTE: Needed for GHC 8 +clamp :: Ord a => (a, a) -> a -> a +clamp (low, high) x = min high (max low x) + +-- | Number of microseconds in a second (@10^6@). +microsecondsPerSecond :: Integer +microsecondsPerSecond = 1_000_000 + +-- | Number of picoseconds in a second (@10^12@). +picosecondsPerSecond :: Integer +picosecondsPerSecond = 1_000_000_000_000 diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs index ff05b517fb..868ca695dd 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs @@ -54,6 +54,7 @@ import Cardano.Slotting.Slot (WithOrigin (..)) import Control.Monad (forM_, unless, void, when) import Control.Monad.Class.MonadThrow (Handler (..), catches) import Control.Monad.Class.MonadTime (MonadTime, getCurrentTime) +import Control.Monad.Class.MonadTimer (MonadTimer) import Control.Monad.IOSim (runSimOrThrow) import Control.Tracer (contramap, contramapM, nullTracer) import Data.DerivingVia (InstantiatedAt (InstantiatedAt)) @@ -88,6 +89,7 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client TraceChainSyncClientEvent (..), bracketChainSyncClient, chainSyncClient, chainSyncStateFor, viewChainSyncState) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck +import Ouroboros.Consensus.Node.GsmState (GsmState (Syncing)) import Ouroboros.Consensus.Node.NetworkProtocolVersion (NodeToNodeVersion) import Ouroboros.Consensus.Node.ProtocolInfo @@ -309,7 +311,7 @@ data ChainSyncOutcome = ChainSyncOutcome { -- Note that updates that are scheduled before the time at which we start -- syncing help generate different chains to start syncing from. runChainSync :: - forall m. (IOLike m, MonadTime m) + forall m. (IOLike m, MonadTime m, MonadTimer m) => ClockSkew -> SecurityParam -> ClientUpdates @@ -500,6 +502,9 @@ runChainSync skew securityParam (ClientUpdates clientUpdates) chainSyncTracer chainDbView varHandles + -- 'Syncing' only ever impacts the LoP, which is disabled in + -- this test, so any value would do. + (pure Syncing) serverId maxBound lopBucketConfig diff --git a/ouroboros-consensus/test/infra-test/Test/Ouroboros/Consensus/Util/LeakyBucket/Tests.hs b/ouroboros-consensus/test/infra-test/Test/Ouroboros/Consensus/Util/LeakyBucket/Tests.hs index 8157045a45..aad4c13f0b 100644 --- a/ouroboros-consensus/test/infra-test/Test/Ouroboros/Consensus/Util/LeakyBucket/Tests.hs +++ b/ouroboros-consensus/test/infra-test/Test/Ouroboros/Consensus/Util/LeakyBucket/Tests.hs @@ -12,19 +12,23 @@ module Test.Ouroboros.Consensus.Util.LeakyBucket.Tests (tests) where import Control.Monad (foldM, void) +import Control.Monad.Class.MonadTimer (MonadTimer) import Control.Monad.IOSim (IOSim, runSimOrThrow) import Data.Either (isLeft, isRight) import Data.Functor ((<&>)) +import Data.List (intersperse) import Data.Ratio ((%)) import Data.Time.Clock (DiffTime, picosecondsToDiffTime) import Ouroboros.Consensus.Util.IOLike (Exception (displayException), MonadAsync, MonadCatch (try), MonadDelay, MonadFork, - MonadMask, MonadThrow (throwIO), NoThunks, SomeException, - Time (Time), addTime, fromException, threadDelay) + MonadMask, MonadSTM, MonadThrow (throwIO), NoThunks, + SomeException, Time (Time), addTime, fromException, + threadDelay) import Ouroboros.Consensus.Util.LeakyBucket import Test.QuickCheck (Arbitrary (arbitrary), Gen, Property, - classify, counterexample, forAll, frequency, ioProperty, - listOf1, scale, suchThat, (===)) + classify, counterexample, forAllShrinkBlind, frequency, + ioProperty, liftArbitrary2, listOf1, scale, shrinkList, + suchThat) import Test.Tasty (TestTree, testGroup) import Test.Tasty.QuickCheck (property, testProperty) import Test.Util.TestEnv (adjustQuickCheckTests) @@ -36,8 +40,8 @@ tests = testGroup "Ouroboros.Consensus.Util.LeakyBucket" [ testProperty "play too long harmless" prop_playTooLongHarmless, testProperty "play with pause" prop_playWithPause, testProperty "play with pause too long" prop_playWithPauseTooLong, - testProperty "wait almost too long" (prop_noRefill (-1)), - testProperty "wait just too long" (prop_noRefill 1), + testProperty "wait almost too long" (prop_noRefill False), + testProperty "wait just too long" (prop_noRefill True), testProperty "propagates exceptions" prop_propagateExceptions, testProperty "propagates exceptions (IO)" prop_propagateExceptionsIO, testProperty "catch exception" prop_catchException, @@ -80,6 +84,13 @@ data TestConfig = TestConfig } deriving (Eq, Show) +data TestState = TestState + { testLevel :: Rational, + testTime :: Time, + testPaused :: Bool + } + deriving (Eq, Show) + instance Arbitrary TestConfig where arbitrary = TestConfig @@ -107,38 +118,59 @@ mkConfig TestConfig {testCapacity, testRate, testThrowOnEmpty} = -- | Make a configuration that fills on overflow and throws 'EmptyBucket' on -- empty bucket. -configThrow :: MonadThrow m => Capacity -> Rate -> Config m +configThrow :: Capacity -> Rate -> TestConfig configThrow (Capacity testCapacity) (Rate testRate) = - mkConfig TestConfig{testCapacity, testRate, testThrowOnEmpty = True} + TestConfig{testCapacity, testRate, testThrowOnEmpty = True} -- | A configuration with capacity and rate 1, that fills on overflow and throws -- 'EmptyBucket' on empty bucket. -config11Throw :: MonadThrow m => Config m +config11Throw :: TestConfig config11Throw = configThrow (Capacity 1) (Rate 1) -- | Make a configuration that fills on overflow and does nothing on empty -- bucket. -configPure :: MonadThrow m => Capacity -> Rate -> Config m +configPure :: Capacity -> Rate -> TestConfig configPure (Capacity testCapacity) (Rate testRate) = - mkConfig TestConfig{testCapacity, testRate, testThrowOnEmpty = False} + TestConfig{testCapacity, testRate, testThrowOnEmpty = False} -- | A configuration with capacity 1 and rate 1, that fills on overflow and does -- nothing on empty bucket. -config11Pure :: MonadThrow m => Config m +config11Pure :: TestConfig config11Pure = configPure (Capacity 1) (Rate 1) --- | Strip the configuration from a 'State', so as to make it comparable, --- showable, etc. -stripConfig :: State cfg -> State () -stripConfig state = state{config=()} - --- | 'evalAgainstBucket' followed by 'stripConfig'. -stripEvalAgainstBucket :: - (MonadDelay m, MonadAsync m, MonadFork m, MonadMask m, NoThunks (m ())) => - Config m -> +stateToTestState :: State m -> TestState +stateToTestState State{level, time, paused} = + TestState{testLevel = level, testTime = time, testPaused = paused} + +-- | 'execAgainstBucket' except it takes a 'TestConfig'. +testExecAgainstBucket :: + ( MonadDelay m, + MonadAsync m, + MonadFork m, + MonadMask m, + MonadTimer m, + NoThunks (m ()) + ) => + TestConfig -> (Handlers m -> m a) -> - m (State ()) -stripEvalAgainstBucket config action = stripConfig <$> evalAgainstBucket config action + m a +testExecAgainstBucket testConfig action = + execAgainstBucket (mkConfig testConfig) action + +-- | 'evalAgainstBucket' except it takes a 'TestConfig' and returns a 'TestState'. +testEvalAgainstBucket :: + ( MonadDelay m, + MonadAsync m, + MonadFork m, + MonadMask m, + MonadTimer m, + NoThunks (m ()) + ) => + TestConfig -> + (Handlers m -> m a) -> + m TestState +testEvalAgainstBucket testConfig action = + stateToTestState <$> evalAgainstBucket (mkConfig testConfig) action -- | Alias for 'runSimOrThrow' by analogy to 'ioProperty'. ioSimProperty :: forall a. (forall s. IOSim s a) -> a @@ -162,10 +194,6 @@ shouldEvaluateTo a v = | otherwise -> counterexample ("Expected " ++ show v ++ "; got " ++ show result) False Left (exn :: SomeException) -> counterexample ("Expected " ++ show v ++ "; got exception " ++ displayException exn) False --- | Number of picoseconds in a second (@10^12@). -picosecondsPerSecond :: Integer -picosecondsPerSecond = 1_000_000_000_000 - -------------------------------------------------------------------------------- -- Simple properties -------------------------------------------------------------------------------- @@ -175,20 +203,20 @@ picosecondsPerSecond = 1_000_000_000_000 prop_playABit :: Property prop_playABit = ioSimProperty $ - stripEvalAgainstBucket config11Throw (\handlers -> do + testEvalAgainstBucket config11Throw (\handlers -> do threadDelay 0.5 - void $ fill handlers 67 + void $ fill' handlers 67 threadDelay 0.9 - ) `shouldEvaluateTo` State{level = 1 % 10, time = Time 1.4, paused = False, config = ()} + ) `shouldEvaluateTo` TestState{testLevel = 1 % 10, testTime = Time 1.4, testPaused = False} -- | One test case similar to 'prop_playABit' but we wait a bit too long and -- should observe the triggering of the 'onEmpty' action. prop_playTooLong :: Property prop_playTooLong = ioSimProperty $ - stripEvalAgainstBucket config11Throw (\handlers -> do + testEvalAgainstBucket config11Throw (\handlers -> do threadDelay 0.5 - void $ fill handlers 67 + void $ fill' handlers 67 threadDelay 1.1 ) `shouldThrow` EmptyBucket @@ -197,31 +225,31 @@ prop_playTooLong = prop_playTooLongHarmless :: Property prop_playTooLongHarmless = ioSimProperty $ - stripEvalAgainstBucket config11Pure (\handlers -> do + testEvalAgainstBucket config11Pure (\handlers -> do threadDelay 0.5 - void $ fill handlers 67 + void $ fill' handlers 67 threadDelay 1.1 - ) `shouldEvaluateTo` State{level = 0, time = Time 1.6, paused = False, config = ()} + ) `shouldEvaluateTo` TestState{testLevel = 0, testTime = Time 1.6, testPaused = False} prop_playWithPause :: Property prop_playWithPause = ioSimProperty $ - stripEvalAgainstBucket config11Throw (\handlers -> do + testEvalAgainstBucket config11Throw (\handlers -> do threadDelay 0.5 - setPaused handlers True + setPaused' handlers True threadDelay 1.5 - setPaused handlers False + setPaused' handlers False threadDelay 0.4 - ) `shouldEvaluateTo` State{level = 1 % 10, time = Time 2.4, paused = False, config = ()} + ) `shouldEvaluateTo` TestState{testLevel = 1 % 10, testTime = Time 2.4, testPaused = False} prop_playWithPauseTooLong :: Property prop_playWithPauseTooLong = ioSimProperty $ - stripEvalAgainstBucket config11Throw (\handlers -> do + testEvalAgainstBucket config11Throw (\handlers -> do threadDelay 0.5 - setPaused handlers True + setPaused' handlers True threadDelay 1.5 - setPaused handlers False + setPaused' handlers False threadDelay 0.6 ) `shouldThrow` EmptyBucket @@ -230,24 +258,24 @@ prop_playWithPauseTooLong = -- state. If the offset is positive, we should get an exception. NOTE: Do not -- use an offset of @0@. NOTE: Considering the precision, we *need* IOSim for -- this test. -prop_noRefill :: Integer -> Capacity -> Rate -> Property -prop_noRefill offset capacity@(Capacity c) rate@(Rate r) = do +prop_noRefill :: Bool -> Capacity -> Rate -> Property +prop_noRefill tooLong capacity@(Capacity c) rate@(Rate r) = do -- NOTE: The @-1@ is to ensure that we do not test the situation where the -- bucket empties at the *exact* same time (curtesy of IOSim) as the action. - let ps = floor (c / r * fromInteger picosecondsPerSecond) + offset + let ps = + floor (c / r * fromInteger picosecondsPerSecond) + + (if tooLong then 1 else -1) * microsecondsPerSecond time = picosecondsToDiffTime ps level = c - (ps % picosecondsPerSecond) * r - if - | offset < 0 -> + if tooLong + then ioSimProperty $ - stripEvalAgainstBucket (configThrow capacity rate) (\_ -> threadDelay time) - `shouldEvaluateTo` State{level, time = Time time, paused = False, config = ()} - | offset > 0 -> + testEvalAgainstBucket (configThrow capacity rate) (\_ -> threadDelay time) + `shouldThrow` EmptyBucket + else ioSimProperty $ - stripEvalAgainstBucket (configThrow capacity rate) (\_ -> threadDelay time) - `shouldThrow` EmptyBucket - | otherwise -> - error "prop_noRefill: do not use an offset of 0" + testEvalAgainstBucket (configThrow capacity rate) (\_ -> threadDelay time) + `shouldEvaluateTo` TestState {testLevel = level, testTime = Time time, testPaused = False} -------------------------------------------------------------------------------- -- Exception propagation @@ -263,7 +291,7 @@ instance Exception NoPlumberException prop_propagateExceptions :: Property prop_propagateExceptions = ioSimProperty $ - stripEvalAgainstBucket config11Throw (\_ -> throwIO NoPlumberException) + testEvalAgainstBucket config11Throw (\_ -> throwIO NoPlumberException) `shouldThrow` NoPlumberException @@ -271,7 +299,7 @@ prop_propagateExceptions = prop_propagateExceptionsIO :: Property prop_propagateExceptionsIO = ioProperty $ - stripEvalAgainstBucket config11Throw (\_ -> throwIO NoPlumberException) + testEvalAgainstBucket config11Throw (\_ -> throwIO NoPlumberException) `shouldThrow` NoPlumberException @@ -280,7 +308,7 @@ prop_propagateExceptionsIO = prop_catchException :: Property prop_catchException = ioSimProperty $ - execAgainstBucket config11Throw (\_ -> try $ threadDelay 1000) + testExecAgainstBucket config11Throw (\_ -> try $ threadDelay 1000) `shouldEvaluateTo` Left EmptyBucket @@ -290,58 +318,90 @@ prop_catchException = -- | Abstract “actions” to be run. We can either wait by some time or refill the -- bucket by some value. -data Action = ThreadDelay DiffTime | Fill Rational | SetPaused Bool +data Action + = Wait DiffTime + | Fill Rational + | SetPaused Bool + | -- | Set the configuration, then wait the given time. Setting the + -- configuration without waiting can lead to poorly defined situations. + SetConfigWait TestConfig DiffTime deriving (Eq, Show) -- | Random generation of 'Action's. The scales and frequencies are taken such -- that we explore as many interesting cases as possible. genAction :: Gen Action -genAction = frequency [ - (1, ThreadDelay . picosecondsToDiffTime <$> scale (* fromInteger picosecondsPerSecond) (arbitrary `suchThat` (>= 0))), - (1, Fill <$> scale (* 1_000_000_000_000_000) (arbitrary `suchThat` (>= 0))), - (1, SetPaused <$> arbitrary) - ] +genAction = + frequency + [ (1, Wait <$> genDelay), + (1, Fill <$> scale (* 1_000_000_000_000_000) (arbitrary `suchThat` (>= 0))), + (1, SetPaused <$> arbitrary), + (1, SetConfigWait <$> arbitrary <*> genDelay) + ] + where + genDelay = picosecondsToDiffTime <$> scale (* fromInteger picosecondsPerSecond) (arbitrary `suchThat` (>= 0)) -- | How to run the 'Action's in a monad. -applyActions :: MonadDelay m => Handlers m -> [Action] -> m () +applyActions :: (MonadDelay m, MonadThrow m, MonadSTM m) => Handlers m -> [Action] -> m () applyActions handlers = mapM_ $ \case - ThreadDelay t -> threadDelay t - Fill t -> void $ fill handlers t - SetPaused p -> setPaused handlers p + Wait t -> threadDelay t + Fill t -> void $ fill' handlers t + SetPaused p -> setPaused' handlers p + SetConfigWait cfg t -> do + updateConfig' handlers $ (\(l, _) -> (l, mkConfig cfg)) + threadDelay t -- | A model of what we expect the 'Action's to lead to, either an 'EmptyBucket' -- exception (if the bucket won the race) or a 'State' (otherwise). -modelActions :: TestConfig -> [Action] -> Either EmptyBucket (State TestConfig) -modelActions config = - foldM go $ State{level = testCapacity config, time = Time 0, paused = False, config} +modelActions :: TestConfig -> [Action] -> Either EmptyBucket TestState +modelActions testConfig = + (snd <$>) . foldM go (testConfig, TestState {testLevel = testCapacity testConfig, testTime = Time 0, testPaused = False}) where - go :: State TestConfig -> Action -> Either EmptyBucket (State TestConfig) - go state@State{time, level, paused, config=TestConfig{testCapacity, testRate, testThrowOnEmpty}} = \case + go :: (TestConfig, TestState) -> Action -> Either EmptyBucket (TestConfig, TestState) + go (config@TestConfig {testCapacity, testRate, testThrowOnEmpty}, state@TestState {testTime, testLevel, testPaused}) = \case Fill t -> - Right state{level = min testCapacity (level + t)} - ThreadDelay t -> - let newTime = addTime t time - newLevel = if paused then level else max 0 (level - diffTimeToSecondsRational t * testRate) + Right (config, state {testLevel = clamp (0, testCapacity) (testLevel + t)}) + Wait t -> + let newTime = addTime t testTime + newLevel = + if testPaused + then testLevel + else clamp (0, testCapacity) (testLevel - diffTimeToSecondsRational t * testRate) in if newLevel <= 0 && testThrowOnEmpty then Left EmptyBucket - else Right state{time = newTime, level = newLevel} - SetPaused p -> - Right state{paused = p} + else Right (config, state {testTime = newTime, testLevel = newLevel}) + SetPaused newPaused -> + Right (config, state {testPaused = newPaused}) + SetConfigWait newConfig@TestConfig {testCapacity = newTestCapacity} t -> + go (newConfig, state {testLevel = clamp (0, newTestCapacity) testLevel}) (Wait t) -- | A bunch of test cases where we generate a list of 'Action's ,run them via -- 'applyActions' and compare the result to that of 'modelActions'. -prop_random :: TestConfig -> Property -prop_random testConfig = - forAll (listOf1 genAction) $ \actions -> - let modelResult = modelActions testConfig actions - nbActions = length actions - in classify (isLeft modelResult) "bucket finished empty" $ - classify (isRight modelResult) "bucket finished non-empty" $ - classify (nbActions <= 10) "<= 10 actions" $ - classify (10 < nbActions && nbActions <= 20) "11-20 actions" $ - classify (20 < nbActions && nbActions <= 50) "21-50 actions" $ - classify (50 < nbActions) "> 50 actions" $ - runSimOrThrow ( - try $ stripEvalAgainstBucket (mkConfig testConfig) $ - flip applyActions actions - ) === (stripConfig <$> modelResult) +prop_random :: Property +prop_random = + forAllShrinkBlind + (liftArbitrary2 arbitrary (listOf1 genAction)) + (traverse (shrinkList (const []))) + $ \(testConfig, actions) -> + let result = + runSimOrThrow + ( try $ + testEvalAgainstBucket testConfig $ + flip applyActions actions + ) + modelResult = modelActions testConfig actions + nbActions = length actions + in classify (isLeft modelResult) "bucket finished empty" $ + classify (isRight modelResult) "bucket finished non-empty" $ + classify (nbActions <= 10) "<= 10 actions" $ + classify (10 < nbActions && nbActions <= 20) "11-20 actions" $ + classify (20 < nbActions && nbActions <= 50) "21-50 actions" $ + classify (50 < nbActions) "> 50 actions" $ + counterexample ("Config: " ++ show testConfig) $ + counterexample ("Actions:\n" ++ (concat $ intersperse "\n" $ map ((" - " ++) . show) actions)) $ + counterexample ("Result: " ++ show result) $ + counterexample ("Model: " ++ show modelResult) $ + result == modelResult + +-- NOTE: Needed for GHC 8 +clamp :: Ord a => (a, a) -> a -> a +clamp (low, high) x = min high (max low x) From 82376e6f913c4fd5bb913ac0f411c9952b644831 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Fri, 3 May 2024 16:33:30 +0000 Subject: [PATCH 02/26] Only select chains whose tip is in the loe fragment Also: * Simplify chainDiffs computation in chainSelectionForBlock * Revert "Don't allow to extend the selection by more than k blocks" This reverts commit c8468c28f49aea116116f5676ff931ce3e39eb3c. * Revert "Retrigger GDD when the selection changes" This reverts commit f990d41fc52ab37cfaf5cc11762f08fcebc6e612. --- .../Test/Consensus/PeerSimulator/Run.hs | 13 +--- .../Storage/ChainDB/Impl/ChainSel.hs | 69 +++++++++++++------ 2 files changed, 52 insertions(+), 30 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index b7c04a17d3..2954a6af94 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -362,17 +362,10 @@ startNode schedulerConfig genesisTest interval = do -- The block fetch logic needs to be started after the block fetch clients -- otherwise, an internal assertion fails because getCandidates yields more -- peer fragments than registered clients. - let getCurrentChain = ChainDB.getCurrentChain lnChainDb - - gdd = updateLoEFragGenesis lrConfig (mkGDDTracerTestBlock lrTracer) (readTVar handles) - -- We make GDD rerun every time the anchor or the blocks of the - -- selection change. - gddTrigger = do - s <- viewChainSyncState handles (\ s -> (csLatestSlot s, csIdling s)) - c <- getCurrentChain - return (s, [AF.anchorToHash $ AF.headAnchor c]) - BlockFetch.startBlockFetchLogic lrRegistry lrTracer lnChainDb fetchClientRegistry getCandidates + + let gdd = updateLoEFragGenesis lrConfig (mkGDDTracerTestBlock lrTracer) (readTVar handles) + gddTrigger = viewChainSyncState handles (\ s -> (csLatestSlot s, csIdling s)) for_ lrLoEVar $ \ var -> do forkLinkedThread lrRegistry "LoE updater background" $ void $ runGdd gdd var lnChainDb gddTrigger diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 13f0aff464..47e142f0fb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -583,7 +583,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do , Just maxExtra <- computeLoEMaxExtra loeFrag newBlockFrag -> do -- ### Add to current chain traceWith addBlockTracer (TryAddToCurrentChain p) - addToCurrentChain succsOf' curChainAndLedger maxExtra + addToCurrentChain succsOf' curChainAndLedger loeFrag maxExtra -- The block is reachable from the current selection -- and it doesn't fit after the current selection @@ -595,7 +595,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do , Just maxExtra <- computeLoEMaxExtra loeFrag newBlockFrag -> do -- ### Switch to a fork traceWith addBlockTracer (TrySwitchToAFork p diff) - switchToAFork succsOf' lookupBlockInfo' curChainAndLedger maxExtra diff + switchToAFork succsOf' lookupBlockInfo' curChainAndLedger loeFrag maxExtra diff -- We cannot reach the block from the current selection | otherwise -> do @@ -647,10 +647,12 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do => (ChainHash blk -> Set (HeaderHash blk)) -> ChainAndLedger blk -- ^ The current chain and ledger + -> LoE (AnchoredFragment (Header blk)) + -- ^ LoE fragment -> LoELimit -- ^ How many extra blocks to select after @b@ at most. -> m (Point blk) - addToCurrentChain succsOf curChainAndLedger maxExtra = do + addToCurrentChain succsOf curChainAndLedger loeFrag maxExtra = do -- Extensions of @B@ that do not exceed the LoE let suffixesAfterB = Paths.maximalCandidates succsOf maxExtra (realPointToPoint p) @@ -671,10 +673,10 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do return $ AF.fromOldestFirst curHead (hdr : hdrs) let chainDiffs = NE.nonEmpty - $ NE.filter ( preferAnchoredCandidate (bcfg chainSelEnv) curChain - . Diff.getSuffix - ) - $ fmap Diff.extend candidates + $ map Diff.extend + $ filter (followsLoEFrag loeFrag) + $ NE.filter (preferAnchoredCandidate (bcfg chainSelEnv) curChain) + candidates -- All candidates are longer than the current chain, so they will be -- preferred over it, /unless/ the block we just added is an EBB, -- which has the same 'BlockNo' as the block before it, so when @@ -705,6 +707,17 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do curTip = castPoint $ AF.headPoint curChain curHead = AF.headAnchor curChain + -- Either frag extends loe or loe extends frag + -- + -- PRECONDITION: @AF.withinFragmentBounds (AF.anchorPoint frag) loe@ + followsLoEFrag :: LoE (AnchoredFragment (Header blk)) + -> AnchoredFragment (Header blk) + -> Bool + followsLoEFrag LoEDisabled _ = True + followsLoEFrag (LoEEnabled loe) frag = + AF.withinFragmentBounds (AF.headPoint loe) frag + || AF.withinFragmentBounds (AF.headPoint frag) loe + -- | We have found a 'ChainDiff' through the VolatileDB connecting the new -- block to the current chain. We'll call the intersection/anchor @x@. -- @@ -717,22 +730,26 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -> LookupBlockInfo blk -> ChainAndLedger blk -- ^ The current chain (anchored at @i@) and ledger + -> LoE (AnchoredFragment (Header blk)) + -- ^ LoE fragment -> LoELimit -- ^ How many extra blocks to select after @b@ at most. -> ChainDiff (HeaderFields blk) -- ^ Header fields for @(x,b]@ -> m (Point blk) - switchToAFork succsOf lookupBlockInfo curChainAndLedger maxExtra diff = do + switchToAFork succsOf lookupBlockInfo curChainAndLedger loeFrag maxExtra diff = do -- We use a cache to avoid reading the headers from disk multiple -- times in case they're part of multiple forks that go through @b@. let initCache = Map.singleton (headerHash hdr) hdr chainDiffs <- + fmap (filter (followsLoEFrag loeFrag . Diff.getSuffix)) + -- 4. Filter out candidates that are not preferred over the current -- chain. -- -- The suffixes all fork off from the current chain within @k@ -- blocks, so it satisfies the precondition of 'preferCandidate'. - fmap + . fmap ( filter ( preferAnchoredCandidate (bcfg chainSelEnv) curChain . Diff.getSuffix @@ -770,11 +787,16 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- | How many extra blocks to select at most after the tip of @newBlockFrag@ -- according to the LoE. -- - -- In no case the selection is allowed to be extended by more than k blocks. - -- We don't control from what chain those blocks would be selected at this - -- point. If we allowed more than k blocks, the immutable tip could enter an - -- adversarial branch. + -- There are two cases to consider: + -- + -- 1. If @newBlockFrag@ and @loeFrag@ are on the same chain, then we cannot + -- select more than @loeLimit@ blocks after @loeFrag@. -- + -- 2. If @newBlockFrag@ and @loeFrag@ are on different chains, then we + -- cannot select more than @loeLimit@ blocks after their intersection. + -- + -- In any case, 'Nothing' is returned if @newBlockFrag@ extends beyond + -- what LoE allows. computeLoEMaxExtra :: (HasHeader x, HeaderHash x ~ HeaderHash blk) => LoE (AnchoredFragment (Header blk)) @@ -784,13 +806,20 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- ^ The fragment with the new block @b@ as its tip, with the same -- anchor as @curChain@. -> Maybe LoELimit - computeLoEMaxExtra (LoEEnabled loeFrag) newBlockFrag - | rollback > k = Nothing - | otherwise = Just $ LoELimit $ k - rollback - where - d = Diff.diff newBlockFrag loeFrag - rollback = Diff.getRollback d - + computeLoEMaxExtra (LoEEnabled loeFrag) newBlockFrag = + -- Both fragments are on the same chain + if loeSuffixLength == 0 || rollback == 0 then + if rollback > k + loeSuffixLength + then Nothing + else Just $ LoELimit $ k + loeSuffixLength - rollback + else + if rollback > k + then Nothing + else Just $ LoELimit $ k - rollback + where + d = Diff.diff newBlockFrag loeFrag + rollback = Diff.getRollback d + loeSuffixLength = fromIntegral $ AF.length (Diff.getSuffix d) computeLoEMaxExtra LoEDisabled _ = Just LoEUnlimited From 64f3da3d3f1918c1e6de5f7f92a8cbf20fc1ea1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Fri, 3 May 2024 18:12:35 +0000 Subject: [PATCH 03/26] Don't retrigger chain selection if the tip of the loe fragment doesn't change --- .../Ouroboros/Consensus/Genesis/Governor.hs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 02cb70527d..9a4a34ee59 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -34,7 +34,7 @@ module Ouroboros.Consensus.Genesis.Governor ( , updateLoEFragUnconditional ) where -import Control.Monad (guard) +import Control.Monad (guard, when) import Control.Tracer (Tracer, traceWith) import Data.Containers.ListUtils (nubOrd) import Data.Foldable (for_) @@ -163,8 +163,11 @@ runGdd loEUpdater varLoEFrag chainDb getTrigger = curLedger <- ChainDB.getCurrentLedger chainDb pure (newTrigger, curChain, curLedger) loeFrag <- updateLoEFrag loEUpdater curChain curLedger - atomically $ writeTVar varLoEFrag loeFrag - triggerChainSelectionAsync chainDb + oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag + -- The chain selection only depends on the LoE tip, so there + -- is no point in retriggering it if the LoE tip hasn't changed. + when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ + triggerChainSelectionAsync chainDb spin newTrigger data DensityBounds blk = From 2f7152b210c02dabbe783015191cc43859937cc4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Tue, 7 May 2024 11:09:33 +0000 Subject: [PATCH 04/26] Increase amount of tries in the Uniform tests and enable CSJ in all of them --- .../Test/Consensus/Genesis/Tests/Uniform.hs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index 249aadc62c..98e4c8a6dc 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -66,11 +66,8 @@ tests = -- See Note [Leashing attacks] testProperty "stalling leashing attack" prop_leashingAttackStalling, testProperty "time limited leashing attack" prop_leashingAttackTimeLimited, - adjustQuickCheckTests (`div` 10) $ testProperty "serve adversarial branches" prop_serveAdversarialBranches, - adjustQuickCheckTests (`div` 100) $ testProperty "the LoE stalls the chain, but the immutable tip is honest" prop_loeStalling, - adjustQuickCheckTests (`div` 100) $ -- This is a crude way of ensuring that we don't get chains with more than 100 blocks, -- because this test writes the immutable chain to disk and `instance Binary TestBlock` -- chokes on long chains. @@ -357,10 +354,10 @@ prop_loeStalling = pure gt {gtChainSyncTimeouts = chainSyncNoTimeouts {canAwaitTimeout = shortWait}} ) - (defaultSchedulerConfig { + defaultSchedulerConfig { scEnableLoE = True, - scEnableChainSyncTimeouts = True - }) + scEnableCSJ = True + } shrinkPeerSchedules @@ -395,8 +392,12 @@ prop_downtime = forAllGenesisTest (genChains (QC.choose (1, 4)) `enrichedWith` \ gt -> ensureScheduleDuration gt <$> stToGen (uniformPointsWithDowntime (gtSecurityParam gt) (gtBlockTree gt))) - (defaultSchedulerConfig - {scEnableLoE = True, scEnableLoP = True, scDowntime = Just 11}) + defaultSchedulerConfig + { scEnableLoE = True + , scEnableLoP = True + , scDowntime = Just 11 + , scEnableCSJ = True + } shrinkPeerSchedules From a969b377cac85634681b4a6e872a91ce0150b598 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Mon, 6 May 2024 09:52:23 +0000 Subject: [PATCH 05/26] Allow to specify tests with multiple honest peers * Rewrite `Peers` to accept arbitrary number of peers * Actually generate honest peers in CSJ happy path * Support a field for extra honest peers in `GenesisTest` * Allow `uniformPoints` to generate schedules with multiple honest peers * Adapt CSJ test to use native multiple honest peers generation * Share partial accessor functions used in tests * Use partial accessor to retrieve the only honest peer --- .../ouroboros-consensus-diffusion.cabal | 1 + .../Consensus/Genesis/Setup/Classifiers.hs | 18 +- .../Test/Consensus/Genesis/Setup/GenChains.hs | 12 +- .../Test/Consensus/Genesis/Tests/CSJ.hs | 32 +-- .../Genesis/Tests/DensityDisconnect.hs | 49 ++-- .../Test/Consensus/Genesis/Tests/LoE.hs | 22 +- .../Test/Consensus/Genesis/Tests/LoP.hs | 22 +- .../Test/Consensus/Genesis/Tests/Uniform.hs | 39 +-- .../Test/Consensus/PeerSimulator/Run.hs | 3 +- .../Consensus/PeerSimulator/StateDiagram.hs | 9 +- .../Test/Consensus/PointSchedule.hs | 93 +++++-- .../Test/Consensus/PointSchedule/Peers.hs | 234 +++++++++++------- .../Test/Consensus/PointSchedule/Shrinking.hs | 128 ++++++---- .../PointSchedule/Shrinking/Tests.hs | 8 +- .../Test/Util/PartialAccessors.hs | 42 ++++ 15 files changed, 431 insertions(+), 281 deletions(-) create mode 100644 ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index 26c461887c..39d63d5e69 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -266,6 +266,7 @@ test-suite consensus-test Test.Consensus.PointSchedule.SinglePeer Test.Consensus.PointSchedule.SinglePeer.Indices Test.Consensus.PointSchedule.Tests + Test.Util.PartialAccessors Test.Util.TersePrinting build-depends: diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs index 559b6f1712..a7eb599293 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs @@ -38,8 +38,7 @@ import Test.Consensus.Network.AnchoredFragment.Extras (slotLength) import Test.Consensus.PeerSimulator.StateView (PeerSimulatorResult (..), StateView (..), pscrToException) import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId (..), - Peers (..)) +import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.Util.Orphans.IOLike () import Test.Util.TestBlock (TestBlock, TestHash (TestHash), @@ -165,15 +164,15 @@ resultClassifiers GenesisTest{gtSchedule} RunGenesisTestResult{rgtrStateView} = StateView{svPeerSimulatorResults} = rgtrStateView adversaries :: [PeerId] - adversaries = Map.keys $ others gtSchedule + adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers gtSchedule adversariesCount = fromIntegral $ length adversaries adversariesExceptions :: [(PeerId, SomeException)] adversariesExceptions = mapMaybe (\PeerSimulatorResult{psePeerId, pseResult} -> case psePeerId of - HonestPeer -> Nothing - pid -> (pid,) <$> pscrToException pseResult + HonestPeer _ -> Nothing + pid -> (pid,) <$> pscrToException pseResult ) svPeerSimulatorResults @@ -251,18 +250,17 @@ scheduleClassifiers GenesisTest{gtSchedule = schedule} = rollbacks :: Peers Bool rollbacks = hasRollback <$> schedule - adversaryRollback = any value $ others rollbacks + adversaryRollback = any id $ adversarialPeers rollbacks + honestRollback = any id $ honestPeers rollbacks - honestRollback = value $ honest rollbacks - - allAdversariesEmpty = all value $ others $ null <$> schedule + allAdversariesEmpty = all id $ adversarialPeers $ null <$> schedule isTrivial :: PeerSchedule TestBlock -> Bool isTrivial = \case [] -> True (t0, _):points -> all ((== t0) . fst) points - allAdversariesTrivial = all value $ others $ isTrivial <$> schedule + allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> schedule simpleHash :: HeaderHash block ~ TestHash => diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs index 70bbceb8fa..e65772275c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/GenChains.hs @@ -8,6 +8,7 @@ module Test.Consensus.Genesis.Setup.GenChains ( GenesisTest (..) , genChains + , genChainsWithExtraHonestPeers ) where import Cardano.Slotting.Time (SlotLength, getSlotLength, @@ -94,6 +95,9 @@ genAlternativeChainSchema (testRecipeH, arHonest) = let H.ChainSchema _ v = A.uniformAdversarialChain (Just alternativeAsc) testRecipeA'' seed pure $ Just (prefixCount, Vector.toList (getVector v)) +genChains :: QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) +genChains = genChainsWithExtraHonestPeers (pure 0) + -- | Random generator for a block tree. The block tree contains one trunk (the -- “honest” chain) and as many branches as given as a parameter (the -- “alternative” chains or “bad” chains). For instance, one such tree could be @@ -103,8 +107,10 @@ genAlternativeChainSchema (testRecipeH, arHonest) = -- trunk: O─────1──2──3──4─────5──6──7 -- │ ╰─────6 -- ╰─────3──4─────5 -genChains :: QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) -genChains genNumForks = do +-- For now, the @extraHonestPeers@ generator is only used to fill the GenesisTest field. +-- However, in the future it could also be used to generate "short forks" near the tip of the trunk. +genChainsWithExtraHonestPeers :: QC.Gen Word -> QC.Gen Word -> QC.Gen (GenesisTest TestBlock ()) +genChainsWithExtraHonestPeers genNumExtraHonest genNumForks = do (asc, honestRecipe, someHonestChainSchema) <- genHonestChainSchema H.SomeHonestChainSchema _ _ honestChainSchema <- pure someHonestChainSchema @@ -116,6 +122,7 @@ genChains genNumForks = do HonestRecipe (Kcp kcp) (Scg scg) delta _len = honestRecipe numForks <- genNumForks + gtExtraHonestPeers <- genNumExtraHonest alternativeChainSchemas <- replicateM (fromIntegral numForks) (genAlternativeChainSchema (honestRecipe, honestChainSchema)) pure $ GenesisTest { gtSecurityParam = SecurityParam (fromIntegral kcp), @@ -131,6 +138,7 @@ genChains genNumForks = do -- would make for interesting tests. gtCSJParams = CSJParams $ fromIntegral scg, gtBlockTree = foldl' (flip BT.addBranch') (BT.mkTrunk goodChain) $ zipWith (genAdversarialFragment goodBlocks) [1..] alternativeChainSchemas, + gtExtraHonestPeers, gtSchedule = () } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index 25da0a1dce..d0b8489952 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -3,9 +3,7 @@ module Test.Consensus.Genesis.Tests.CSJ (tests) where -import Control.Monad (replicateM) import Data.Containers.ListUtils (nubOrd) -import Data.Functor (($>)) import Data.List (nub) import Data.Maybe (mapMaybe) import Ouroboros.Consensus.Block (blockSlot, succWithOrigin) @@ -22,11 +20,11 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), import Test.Consensus.PeerSimulator.StateView (StateView (..)) import Test.Consensus.PeerSimulator.Trace (TraceEvent (..)) import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..), - mkPeers) +import Test.Consensus.PointSchedule.Peers (Peers (..), peers') import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestBlock (Header, TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize) @@ -63,14 +61,11 @@ tests = prop_happyPath :: Bool -> Property prop_happyPath synchronized = forAllGenesisTest - ( do - gt <- genChains $ pure 0 - honest <- genHonestSchedule gt - numOthers <- choose (1, 3) - otherHonests <- if synchronized - then pure $ replicate numOthers honest - else replicateM numOthers (genHonestSchedule gt) - pure $ gt $> mkPeers honest otherHonests + ( if synchronized + then genChainsWithExtraHonestPeers (choose (2, 4)) (pure 0) + `enrichedWith` genUniformSchedulePoints + else genChains (pure 0) + `enrichedWith` genDuplicatedHonestSchedule ) ( defaultSchedulerConfig { scEnableCSJ = True @@ -119,13 +114,12 @@ prop_happyPath synchronized = (receivedHeadersOnlyOnce && receivedHeadersFromOnlyOnePeer) ) where - -- | This might seem wasteful, as we discard generated adversarial schedules. - -- It actually isn't, since we call it on trees that have no branches besides - -- the trunk, so no adversaries are generated. - genHonestSchedule :: GenesisTest TestBlock () -> Gen (PeerSchedule TestBlock) - genHonestSchedule gt = do - ps <- genUniformSchedulePoints gt - pure $ value $ honest ps + genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PeersSchedule TestBlock) + genDuplicatedHonestSchedule gt@GenesisTest{gtExtraHonestPeers} = do + Peers {honestPeers} <- genUniformSchedulePoints gt + pure $ peers' + (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) + [] isNewerThanJumpSizeFromTip :: GenesisTestFull TestBlock -> Header TestBlock -> Bool isNewerThanJumpSizeFromTip gt hdr = diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index 87c85f54b9..7c6d9e75c8 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -59,6 +59,7 @@ import Test.QuickCheck.Extras (unsafeMapSuchThatJust) import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TersePrinting (terseHFragment, terseHeader) import Test.Util.TestBlock (TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize, @@ -120,7 +121,7 @@ staticCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = tips = branchTip <$> candidates candidates :: Map PeerId (AnchoredFragment TestBlock) - candidates = Map.fromList (zip (HonestPeer : enumerateAdversaries) chains) + candidates = Map.fromList (zip (HonestPeer 1 : enumerateAdversaries) chains) chains = btTrunk gtBlockTree : (btbFull <$> branches) @@ -134,8 +135,8 @@ prop_densityDisconnectStatic = let (disconnect, _) = densityDisconnect sgen k (mkState <$> suffixes) suffixes loeFrag counterexample "it should disconnect some node" (not (null disconnect)) .&&. - counterexample "it should not disconnect the honest peer" - (HonestPeer `notElem` disconnect) + counterexample "it should not disconnect the honest peers" + (not $ any isHonestPeerId disconnect) where mkState :: AnchoredFragment (Header TestBlock) -> ChainSyncState TestBlock mkState frag = @@ -193,7 +194,7 @@ initCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = fullTree = gtBlockTree } where - peers = mkPeers (peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)) (branchPeer <$> branches) + peers = peers' [peer trunk (AF.Empty (AF.headAnchor trunk)) (btTrunk gtBlockTree)] (branchPeer <$> branches) branchPeer branch = peer (btbPrefix branch) (btbSuffix branch) (btbFull branch) @@ -230,8 +231,8 @@ data UpdateEvent = UpdateEvent { } snapshotTree :: Peers EvolvingPeer -> BlockTree (Header TestBlock) -snapshotTree Peers {honest, others} = - foldr addBranch' (mkTrunk (candidate (value honest))) (candidate . value <$> others) +snapshotTree Peers {honestPeers, adversarialPeers} = + foldr addBranch' (mkTrunk (candidate (getHonestPeer honestPeers))) (candidate <$> adversarialPeers) prettyUpdateEvent :: UpdateEvent -> [String] prettyUpdateEvent UpdateEvent {target, added, killed, bounds, tree, loeFrag, curChain} = @@ -274,7 +275,7 @@ updatePeers :: UpdateEvent -> Either (MonotonicityResult, Peers EvolvingPeer) Evolution updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, killed = killedNow} - | HonestPeer `Set.member` killedNow + | HonestPeer 1 `Set.member` killedNow = Left (HonestKilled, peers) | not (null violations) = Left (Nonmonotonic event, peers) @@ -287,12 +288,12 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k violations = killedBefore \\ killedNow -- The new state if no violations were detected - evo@Evolution {peers = Peers {others = remaining}} + evo@Evolution {peers = Peers {adversarialPeers = remaining}} | targetExhausted -- If the target is done, reset the set of killed peers, since other peers -- may have lost only against the target. -- Remove the target from the active peers. - = Evolution {peers = peers {others = Map.delete target (others peers)}, killed = mempty} + = Evolution {peers = deletePeer target peers, killed = mempty} | otherwise -- Otherwise replace the killed peers with the current set = Evolution {peers, killed = killedNow} @@ -312,11 +313,11 @@ updatePeers (GenesisWindow sgen) peers killedBefore event@UpdateEvent {target, k -- The selection will then be computed by taking up to k blocks after the immutable tip -- on this peer's candidate fragment. firstBranch :: Peers EvolvingPeer -> Peer EvolvingPeer -firstBranch Peers {honest, others} = +firstBranch peers = fromMaybe newest $ - minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList others)) + minimumBy (compare `on` forkAnchor) <$> nonEmpty (filter hasForked (toList (adversarialPeers'' peers))) where - newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (honest : toList others) + newest = maximumBy (compare `on` (AF.headSlot . candidate . value)) (toList (honestPeers'' peers) ++ toList (adversarialPeers'' peers)) forkAnchor = fromWithOrigin 0 . AF.anchorToSlotNo . AF.anchor . forkSuffix . value hasForked Peer {value = EvolvingPeer {candidate, forkSlot}} = AF.headSlot candidate >= forkSlot @@ -325,7 +326,7 @@ firstBranch Peers {honest, others} = -- for all peers, and then taking the earliest among the results. immutableTip :: Peers EvolvingPeer -> AF.Point (Header TestBlock) immutableTip peers = - minimum (lastHonest <$> toList (others peers)) + minimum (lastHonest <$> toList (adversarialPeers'' peers)) where lastHonest Peer {value = EvolvingPeer {candidate, forkSlot = NotOrigin forkSlot}} = AF.headPoint $ @@ -470,7 +471,7 @@ prop_densityDisconnectTriggersChainSel = ( \GenesisTest {gtBlockTree, gtSchedule} stateView@StateView {svTipBlock} -> let - othersCount = Map.size (others gtSchedule) + othersCount = Map.size (adversarialPeers gtSchedule) exnCorrect = case exceptionsByComponent ChainSyncClient stateView of [fromException -> Just DensityTooLow] -> True [] | othersCount == 0 -> True @@ -482,16 +483,6 @@ prop_densityDisconnectTriggersChainSel = ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - - getTrunkTip :: HasHeader blk => BlockTree blk -> blk - getTrunkTip tree = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - -- 1. The adversary advertises blocks up to the intersection. -- 2. The honest node advertises all its chain, which is -- long enough to be blocked by the LoE. @@ -506,16 +497,14 @@ prop_densityDisconnectTriggersChainSel = intersect = case btbPrefix branch of (AF.Empty _) -> Origin (_ AF.:> tipBlock) -> At tipBlock - advTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + advTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain up to the intersection. - [ (Time 0, scheduleTipPoint trunkTip), + [[(Time 0, scheduleTipPoint trunkTip), (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) - ] + ]] -- Advertise the alternate branch early, but wait for the honest -- node to have served its chain before disclosing the alternate -- branch is not dense enough. diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index fe6b842b54..c3cd74a925 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -19,13 +19,14 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, mkPeers) +import Test.Consensus.PointSchedule.Peers (Peers, peers') import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, scheduleHeaderPoint, scheduleTipPoint) import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestEnv (adjustQuickCheckTests) tests :: TestTree @@ -76,27 +77,18 @@ prop_adversaryHitsTimeouts timeoutsEnabled = in selectedCorrect && exceptionsCorrect ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - delaySchedule :: HasHeader blk => BlockTree blk -> Peers (PeerSchedule blk) delaySchedule tree = - let trunkTip = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock + let trunkTip = getTrunkTip tree branch = getOnlyBranch tree intersectM = case btbPrefix branch of (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock - branchTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + branchTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. - ( (Time 0, scheduleTipPoint trunkTip) : case intersectM of + [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of Nothing -> [ (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) @@ -107,7 +99,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled = (Time 5, scheduleHeaderPoint trunkTip), (Time 5, scheduleBlockPoint trunkTip) ] - ) + ] -- The one adversarial peer advertises and serves up to the -- intersection early, then waits more than the short wait timeout. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index 4c97bac307..0c861ebe9f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -22,7 +22,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, mkPeers, +import Test.Consensus.PointSchedule.Peers (Peers, peers', peersOnlyHonest) import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, @@ -30,6 +30,7 @@ import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.TestEnv (adjustQuickCheckTests) tests :: TestTree @@ -212,27 +213,18 @@ prop_delayAttack lopEnabled = in selectedCorrect && exceptionsCorrect ) where - getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk - getOnlyBranch BlockTree {btBranches} = case btBranches of - [branch] -> branch - _ -> error "tree must have exactly one alternate branch" - delaySchedule :: (HasHeader blk) => BlockTree blk -> Peers (PeerSchedule blk) delaySchedule tree = - let trunkTip = case btTrunk tree of - (AF.Empty _) -> error "tree must have at least one block" - (_ AF.:> tipBlock) -> tipBlock + let trunkTip = getTrunkTip tree branch = getOnlyBranch tree intersectM = case btbPrefix branch of (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock - branchTip = case btbFull branch of - (AF.Empty _) -> error "alternate branch must have at least one block" - (_ AF.:> tipBlock) -> tipBlock - in mkPeers + branchTip = getOnlyBranchTip tree + in peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. - ( (Time 0, scheduleTipPoint trunkTip) : case intersectM of + [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of Nothing -> [ (Time 0.5, scheduleHeaderPoint trunkTip), (Time 0.5, scheduleBlockPoint trunkTip) @@ -243,7 +235,7 @@ prop_delayAttack lopEnabled = (Time 5, scheduleHeaderPoint trunkTip), (Time 5, scheduleBlockPoint trunkTip) ] - ) + ] -- Advertise the alternate branch early, but don't serve it -- past the intersection, and wait for LoP bucket. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index 98e4c8a6dc..ebde97568c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -40,8 +40,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (PeerId (..), Peers (..), - value) +import Test.Consensus.PointSchedule.Peers (Peers (..), isHonestPeerId) import Test.Consensus.PointSchedule.Shrinking (shrinkByRemovingAdversaries, shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer @@ -52,6 +51,7 @@ import Test.QuickCheck import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () +import Test.Util.PartialAccessors import Test.Util.QuickCheck (le) import Test.Util.TestBlock (TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize, @@ -89,13 +89,13 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = -- to the governor that the density is too low. longerThanGenesisWindow ==> conjoin [ - counterexample "The honest peer was disconnected" (HonestPeer `notElem` disconnected), + counterexample "An honest peer was disconnected" (not $ any isHonestPeerId disconnected), counterexample ("The immutable tip is not honest: " ++ show immutableTip) $ property (isHonest immutableTipHash), immutableTipIsRecent ] where - advCount = Map.size (others (gtSchedule genesisTest)) + advCount = Map.size (adversarialPeers (gtSchedule genesisTest)) immutableTipIsRecent = counterexample ("Age of the immutable tip: " ++ show immutableTipAge) $ @@ -129,7 +129,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = [] -> "No peers were disconnected" peers -> "Some peers were disconnected: " ++ intercalate ", " (condense <$> peers) - honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ value $ honest $ gtSchedule genesisTest + honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ gtSchedule genesisTest GenesisTest {gtBlockTree, gtGenesisWindow = GenesisWindow s, gtDelay = Delta d} = genesisTest @@ -161,7 +161,12 @@ prop_serveAdversarialBranches = forAllGenesisTest theProperty genUniformSchedulePoints :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) -genUniformSchedulePoints gt = stToGen (uniformPoints (gtBlockTree gt)) +genUniformSchedulePoints gt = stToGen (uniformPoints pointsGeneratorParams (gtBlockTree gt)) + where + pointsGeneratorParams = PointsGeneratorParams + { pgpExtraHonestPeers = fromIntegral $ gtExtraHonestPeers gt + , pgpDowntime = NoDowntime + } -- Note [Leashing attacks] -- @@ -212,7 +217,7 @@ prop_leashingAttackStalling = genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) genLeashingSchedule genesisTest = do Peers honest advs0 <- ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest - advs <- mapM (mapM dropRandomPoints) advs0 + advs <- mapM dropRandomPoints advs0 pure $ Peers honest advs disableBoringTimeouts gt = @@ -266,15 +271,15 @@ prop_leashingAttackTimeLimited = -- | A schedule which doesn't run past the last event of the honest peer genTimeLimitedSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) genTimeLimitedSchedule genesisTest = do - Peers honest advs0 <- genUniformSchedulePoints genesisTest + Peers honests advs0 <- genUniformSchedulePoints genesisTest let timeLimit = estimateTimeBound (gtChainSyncTimeouts genesisTest) (gtLoPBucketParams genesisTest) - (value honest) - (map value $ Map.elems advs0) - advs = fmap (fmap (takePointsUntil timeLimit)) advs0 - extendedHonest = extendScheduleUntil timeLimit <$> honest - pure $ Peers extendedHonest advs + (getHonestPeer honests) + (Map.elems advs0) + advs = fmap (takePointsUntil timeLimit) advs0 + extendedHonests = extendScheduleUntil timeLimit <$> honests + pure $ Peers extendedHonests advs takePointsUntil limit = takeWhile ((<= limit) . fst) @@ -390,7 +395,7 @@ prop_downtime :: Property prop_downtime = forAllGenesisTest (genChains (QC.choose (1, 4)) `enrichedWith` \ gt -> - ensureScheduleDuration gt <$> stToGen (uniformPointsWithDowntime (gtSecurityParam gt) (gtBlockTree gt))) + ensureScheduleDuration gt <$> stToGen (uniformPoints (pointsGeneratorParams gt) (gtBlockTree gt))) defaultSchedulerConfig { scEnableLoE = True @@ -402,3 +407,9 @@ prop_downtime = forAllGenesisTest shrinkPeerSchedules theProperty + + where + pointsGeneratorParams gt = PointsGeneratorParams + { pgpExtraHonestPeers = fromIntegral (gtExtraHonestPeers gt) + , pgpDowntime = DowntimeWithSecurityParam (gtSecurityParam gt) + } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index 2954a6af94..c3c6298f43 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -16,6 +16,7 @@ import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) import Data.Foldable (for_) import Data.Functor (void) +import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Ouroboros.Consensus.Block @@ -460,7 +461,7 @@ runPointSchedule :: m (StateView TestBlock) runPointSchedule schedulerConfig genesisTest tracer0 = withRegistry $ \registry -> do - peerSim <- makePeerSimulatorResources tracer gtBlockTree (getPeerIds gtSchedule) + peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds gtSchedule) lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim (chainDb, stateViewTracers) <- runScheduler (Tracer $ traceWith tracer . TraceSchedulerEvent) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs index 937f4830ce..61b1e7914b 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/StateDiagram.hs @@ -598,11 +598,6 @@ instance Condense RenderCell where CellEllipsis -> " .. " RenderCell _ cell -> condense cell -renderPeerId :: PeerId -> String -renderPeerId = \case - HonestPeer -> "honest" - PeerId p -> p - slotWidth :: NonEmpty Cell -> SlotWidth slotWidth = maximum . fmap cellWidth @@ -612,7 +607,7 @@ slotWidth = CellPeers peerIds -> SlotWidth (sum (labelWidth <$> peerIds)) _ -> 1 - labelWidth pid = 2 + length (renderPeerId pid) + labelWidth pid = 2 + length (show pid) sortWidth = \case CellHere as -> sum (pointWidth <$> as) @@ -773,7 +768,7 @@ renderSlotNo config width num = renderPeers :: [PeerId] -> Col renderPeers peers = - ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ renderPeerId p)) | p <- peers] + ColCat [ColAspect (pure (Candidate p)) (ColString (" " ++ show p)) | p <- peers] renderCell :: RenderConfig -> RenderCell -> Col renderCell config@RenderConfig {ellipsis} = \case diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs index a883a4fc84..3171bbf826 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs @@ -23,6 +23,7 @@ module Test.Consensus.PointSchedule ( BlockFetchTimeout (..) , CSJParams (..) + , DowntimeParams (..) , ForecastRange (..) , GenesisTest (..) , GenesisTestFull @@ -30,6 +31,7 @@ module Test.Consensus.PointSchedule ( , LoPBucketParams (..) , PeerSchedule , PeersSchedule + , PointsGeneratorParams (..) , RunGenesisTestResult (..) , enrichedWith , ensureScheduleDuration @@ -43,16 +45,17 @@ module Test.Consensus.PointSchedule ( , prettyPeersSchedule , stToGen , uniformPoints - , uniformPointsWithDowntime ) where import Cardano.Slotting.Time (SlotLength) +import Control.Monad (replicateM) import Control.Monad.Class.MonadTime.SI (Time (Time), addTime, diffTime) import Control.Monad.ST (ST) import Data.Foldable (toList) import Data.Functor (($>)) import Data.List (mapAccumL, partition, scanl') +import qualified Data.Map.Strict as Map import Data.Maybe (catMaybes, fromMaybe, mapMaybe) import Data.Time (DiffTime) import Data.Word (Word64) @@ -76,7 +79,7 @@ import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule.NodeState (NodeState (..), genesisNodeState) import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..), - mkPeers, peersList) + peers', peersList) import Test.Consensus.PointSchedule.SinglePeer (IsTrunk (IsBranch, IsTrunk), PeerScheduleParams (..), SchedulePoint (..), defaultPeerScheduleParams, mergeOn, @@ -96,6 +99,9 @@ prettyPeersSchedule :: PeersSchedule blk -> [String] prettyPeersSchedule peers = + [ "honest peers: " ++ show (Map.size (honestPeers peers)) + , "adversaries: " ++ show (Map.size (adversarialPeers peers)) + ] ++ zipWith3 (\number time peerState -> number ++ ": " ++ peerState ++ " @ " ++ time @@ -191,7 +197,7 @@ longRangeAttack :: longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do honest <- peerScheduleFromTipPoints g honParams [(IsTrunk, [AF.length btTrunk - 1])] btTrunk [] adv <- peerScheduleFromTipPoints g advParams [(IsBranch, [AF.length (btbFull branch) - 1])] btTrunk [btbFull branch] - pure (mkPeers honest [adv]) + pure (peers' [honest] [adv]) where honParams = defaultPeerScheduleParams {pspHeaderDelayInterval = (0.3, 0.4)} advParams = defaultPeerScheduleParams {pspTipDelayInterval = (0, 0.1)} @@ -199,22 +205,45 @@ longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do longRangeAttack _ _ = error "longRangeAttack can only deal with single adversary" --- | Generate a schedule in which the trunk and branches are served by one peer each, using --- a single tip point, without specifically assigned delay intervals like in --- 'newLongRangeAttack'. +data PointsGeneratorParams = PointsGeneratorParams { + pgpExtraHonestPeers :: Int, + pgpDowntime :: DowntimeParams +} + +data DowntimeParams = NoDowntime | DowntimeWithSecurityParam SecurityParam + +uniformPoints :: + (StatefulGen g m, AF.HasHeader blk) => + PointsGeneratorParams -> + BlockTree blk -> + g -> + m (PeersSchedule blk) +uniformPoints PointsGeneratorParams {pgpExtraHonestPeers, pgpDowntime} = case pgpDowntime of + NoDowntime -> uniformPointsWithExtraHonestPeers pgpExtraHonestPeers + DowntimeWithSecurityParam k -> uniformPointsWithExtraHonestPeersAndDowntime pgpExtraHonestPeers k + +-- | Generate a schedule in which the trunk is served by @pgpExtraHonestPeers + 1@ peers, +-- and extra branches are served by one peer each, using a single tip point, +-- without specifically assigned delay intervals like in 'newLongRangeAttack'. -- -- Include rollbacks in a percentage of adversaries, in which case that peer uses two branchs. -- -uniformPoints :: +uniformPointsWithExtraHonestPeers :: (StatefulGen g m, AF.HasHeader blk) => + Int -> BlockTree blk -> g -> m (PeersSchedule blk) -uniformPoints BlockTree {btTrunk, btBranches} g = do +uniformPointsWithExtraHonestPeers + extraHonestPeers + BlockTree {btTrunk, btBranches} + g + = do honestTip0 <- firstTip btTrunk - honest <- mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] + honests <- replicateM (extraHonestPeers + 1) $ + mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] advs <- takeBranches btBranches - pure (mkPeers honest advs) + pure (peers' honests advs) where takeBranches = \case [] -> pure [] @@ -301,16 +330,16 @@ bumpTips tips = = (tn, (t0, p)) step ts a = (ts, a) -syncTips :: [(Time, SchedulePoint blk)] -> [[(Time, SchedulePoint blk)]] -> ([(Time, SchedulePoint blk)], [[(Time, SchedulePoint blk)]]) -syncTips honest advs = - (bump honest, bump <$> advs) +syncTips :: [[(Time, SchedulePoint blk)]] -> [[(Time, SchedulePoint blk)]] -> ([[(Time, SchedulePoint blk)]], [[(Time, SchedulePoint blk)]]) +syncTips honests advs = + (bump <$> honests, bump <$> advs) where bump = bumpTips earliestTips earliestTips = chooseEarliest <$> zipPadN (tipTimes <$> scheds) - scheds = honest : advs + scheds = honests <> advs chooseEarliest times = minimum (fromMaybe (Time 0) <$> times) --- | This is a variant of 'uniformPoints' that uses multiple tip points, used to simulate node downtimes. +-- | This is a variant of 'uniformPointsWithExtraHonestPeers' that uses multiple tip points, used to simulate node downtimes. -- Ultimately, this should be replaced by a redesign of the peer schedule generator that is aware of node liveness -- intervals. -- @@ -320,23 +349,30 @@ syncTips honest advs = -- The second tip is the last block of each branch. -- -- Includes rollbacks in some schedules. -uniformPointsWithDowntime :: +uniformPointsWithExtraHonestPeersAndDowntime :: (StatefulGen g m, AF.HasHeader blk) => + Int -> SecurityParam -> BlockTree blk -> g -> m (PeersSchedule blk) -uniformPointsWithDowntime (SecurityParam k) BlockTree {btTrunk, btBranches} g = do +uniformPointsWithExtraHonestPeersAndDowntime + extraHonestPeers + (SecurityParam k) + BlockTree {btTrunk, btBranches} + g + = do let kSlot = withOrigin 0 (fromIntegral . unSlotNo) (AF.headSlot (AF.takeOldest (fromIntegral k) btTrunk)) midSlot = (AF.length btTrunk) `div` 2 lowerBound = max kSlot midSlot pauseSlot <- SlotNo . fromIntegral <$> Random.uniformRM (lowerBound, AF.length btTrunk - 1) g honestTip0 <- firstTip pauseSlot btTrunk - honest <- mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] + honests <- replicateM (extraHonestPeers + 1) $ + mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] advs <- takeBranches pauseSlot btBranches - let (honest', advs') = syncTips honest advs - pure (mkPeers honest' advs') + let (honests', advs') = syncTips honests advs + pure (peers' honests' advs') where takeBranches pause = \case [] -> pure [] @@ -391,7 +427,6 @@ uniformPointsWithDowntime (SecurityParam k) BlockTree {btTrunk, btBranches} g = rollbackProb = 0.2 - newtype ForecastRange = ForecastRange { unForecastRange :: Word64 } deriving (Show) @@ -425,6 +460,13 @@ data GenesisTest blk schedule = GenesisTest gtLoPBucketParams :: LoPBucketParams, gtCSJParams :: CSJParams, gtSlotLength :: SlotLength, + -- | The number of extra honest peers we want in the test. + -- It is stored here for convenience, and because it may affect schedule and block tree generation. + -- + -- There will be at most one adversarial peer per alternative branch in the block tree + -- (exactly one per branch if no adversary does a rollback), + -- and `1 + gtExtraHonestPeers` honest peers. + gtExtraHonestPeers :: Word, gtSchedule :: schedule } @@ -499,11 +541,9 @@ duplicateLastPoint d xs = in xs ++ [(addTime d t, p)] ensureScheduleDuration :: GenesisTest blk a -> PeersSchedule blk -> PeersSchedule blk -ensureScheduleDuration gt Peers {honest, others} = - Peers {honest = extendHonest, others} +ensureScheduleDuration gt peers = + duplicateLastPoint endingDelay <$> peers where - extendHonest = duplicateLastPoint endingDelay <$> honest - endingDelay = let cst = gtChainSyncTimeouts gt bft = gtBlockFetchTimeouts gt @@ -513,5 +553,4 @@ ensureScheduleDuration gt Peers {honest, others} = , busyTimeout bft , streamingTimeout bft ]) - - peerCount = 1 + length others + peerCount = length (peersList peers) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs index 9d0a084c08..13742d1d65 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs @@ -15,13 +15,19 @@ module Test.Consensus.PointSchedule.Peers ( Peer (..) , PeerId (..) , Peers (..) + , adversarialPeers' + , adversarialPeers'' + , deletePeer , enumerateAdversaries , fromMap , fromMap' , getPeer , getPeerIds - , mkPeers - , mkPeers' + , honestPeers' + , honestPeers'' + , isAdversarialPeerId + , isHonestPeerId + , peers' , peersFromPeerIdList , peersFromPeerIdList' , peersFromPeerList @@ -33,8 +39,6 @@ module Test.Consensus.PointSchedule.Peers ( ) where import Data.Hashable (Hashable) -import Data.List.NonEmpty (NonEmpty ((:|))) -import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.String (IsString (fromString)) @@ -45,20 +49,25 @@ import Ouroboros.Consensus.Util.Condense (Condense (..), condenseListWithPadding) -- | Identifier used to index maps and specify which peer is active during a tick. -data PeerId = - HonestPeer - | - PeerId String +data PeerId + = HonestPeer Int + | AdversarialPeer Int deriving (Eq, Generic, Show, Ord, NoThunks) instance IsString PeerId where - fromString "honest" = HonestPeer - fromString i = PeerId i + fromString s = case words s of + ["honest"] -> HonestPeer 1 + ["honest", n] -> HonestPeer (read n) + ["adversary"] -> AdversarialPeer 1 + ["adversary", n] -> AdversarialPeer (read n) + _ -> error $ "fromString: invalid PeerId: " ++ s instance Condense PeerId where condense = \case - HonestPeer -> "honest" - PeerId name -> name + HonestPeer 1 -> "honest" + HonestPeer n -> "honest " ++ show n + AdversarialPeer 1 -> "adversary" + AdversarialPeer n -> "adversary " ++ show n instance CondenseList PeerId where condenseList = condenseListWithPadding PadRight @@ -94,119 +103,158 @@ instance CondenseList a => CondenseList (Peer a) where (condenseList $ value <$> peers) -- | General-purpose functor for a set of peers. --- --- REVIEW: There is a duplicate entry for the honest peer, here. We should --- probably either have only the 'Map' or have the keys of the map be 'String'? --- --- Alternatively, we could just have 'newtype PeerId = PeerId String' with an --- alias for 'HonestPeer = PeerId "honest"'? -data Peers a = - Peers { - honest :: Peer a, - others :: Map PeerId (Peer a) +data Peers a = Peers + { honestPeers :: Map Int a, + adversarialPeers :: Map Int a } deriving (Eq, Show) +-- | Variant of 'honestPeers' that returns a map with 'PeerId's as keys. +honestPeers' :: Peers a -> Map PeerId a +honestPeers' = Map.mapKeysMonotonic HonestPeer . honestPeers + +-- | Variant of 'honestPeers' that returns a map with 'PeerId's as keys and +-- values as 'Peer's. +honestPeers'' :: Peers a -> Map PeerId (Peer a) +honestPeers'' = Map.mapWithKey Peer . honestPeers' + +-- | Variant of 'adversarialPeers' that returns a map with 'PeerId's as keys. +adversarialPeers' :: Peers a -> Map PeerId a +adversarialPeers' peers = Map.mapKeysMonotonic AdversarialPeer $ adversarialPeers peers + +-- | Variant of 'adversarialPeers' that returns a map with 'PeerId's as keys and +-- values as 'Peer's. +adversarialPeers'' :: Peers a -> Map PeerId (Peer a) +adversarialPeers'' = Map.mapWithKey Peer . adversarialPeers' + instance Functor Peers where - fmap f Peers {honest, others} = Peers {honest = f <$> honest, others = fmap f <$> others} + fmap f Peers {honestPeers, adversarialPeers} = + Peers + { honestPeers = f <$> honestPeers, + adversarialPeers = f <$> adversarialPeers + } instance Foldable Peers where - foldMap f Peers {honest, others} = (f . value) honest <> foldMap (f . value) others + foldMap f Peers {honestPeers, adversarialPeers} = + foldMap f honestPeers <> foldMap f adversarialPeers -- | A set of peers with only one honest peer carrying the given value. peersOnlyHonest :: a -> Peers a peersOnlyHonest value = - Peers { - honest = Peer {name = HonestPeer, value}, - others = Map.empty + Peers + { honestPeers = Map.singleton 1 value, + adversarialPeers = Map.empty } -- | Extract all 'PeerId's. -getPeerIds :: Peers a -> NonEmpty PeerId -getPeerIds peers = HonestPeer :| Map.keys (others peers) +getPeerIds :: Peers a -> [PeerId] +getPeerIds Peers {honestPeers, adversarialPeers} = + (HonestPeer <$> Map.keys honestPeers) ++ (AdversarialPeer <$> Map.keys adversarialPeers) getPeer :: PeerId -> Peers a -> Peer a -getPeer pid peers - | HonestPeer <- pid - = honest peers - | otherwise - = others peers Map.! pid +getPeer (HonestPeer n) Peers {honestPeers} = Peer (HonestPeer n) (honestPeers Map.! n) +getPeer (AdversarialPeer n) Peers {adversarialPeers} = Peer (AdversarialPeer n) (adversarialPeers Map.! n) updatePeer :: (a -> (a, b)) -> PeerId -> Peers a -> (Peers a, b) -updatePeer f pid Peers {honest, others} - | HonestPeer <- pid - , let (a, b) = f (value honest) - = (Peers {honest = a <$ honest, others}, b) - | otherwise - , let p = others Map.! pid - (a, b) = f (value p) - = (Peers {honest, others = Map.adjust (a <$) pid others}, b) +updatePeer f (HonestPeer n) Peers {honestPeers, adversarialPeers} = + let (a, b) = f (honestPeers Map.! n) + in (Peers {honestPeers = Map.insert n a honestPeers, adversarialPeers}, b) +updatePeer f (AdversarialPeer n) Peers {honestPeers, adversarialPeers} = + let (a, b) = f (adversarialPeers Map.! n) + in (Peers {honestPeers, adversarialPeers = Map.insert n a adversarialPeers}, b) -- | Convert 'Peers' to a list of 'Peer'. -peersList :: Peers a -> NonEmpty (Peer a) -peersList Peers {honest, others} = - honest :| Map.elems others +peersList :: Peers a -> [Peer a] +peersList Peers {honestPeers, adversarialPeers} = + Map.foldrWithKey + (\k v -> (Peer (HonestPeer k) v :)) + ( Map.foldrWithKey + (\k v -> (Peer (AdversarialPeer k) v :)) + [] + adversarialPeers + ) + honestPeers enumerateAdversaries :: [PeerId] -enumerateAdversaries = - (\ n -> PeerId ("adversary " ++ show n)) <$> [1 :: Int ..] +enumerateAdversaries = AdversarialPeer <$> [1 ..] -- | Construct 'Peers' from values, adding adversary names based on the default schema. --- A single adversary gets the ID @adversary@, multiple get enumerated as @adversary N@. -mkPeers :: a -> [a] -> Peers a -mkPeers h as = - Peers (Peer HonestPeer h) (Map.fromList (mkPeer <$> advs as)) - where - mkPeer (pid, a) = (pid, Peer pid a) - advs [a] = [("adversary", a)] - advs _ = zip enumerateAdversaries as - --- | Make a 'Peers' structure from the honest value and the other peers. Fail if --- one of the other peers is the 'HonestPeer'. -mkPeers' :: a -> [Peer a] -> Peers a -mkPeers' value prs = - Peers (Peer HonestPeer value) (Map.fromList $ dupAdvPeerId <$> prs) - where - -- | Duplicate an adversarial peer id; fail if honest. - dupAdvPeerId :: Peer a -> (PeerId, Peer a) - dupAdvPeerId (Peer HonestPeer _) = error "cannot be the honest peer" - dupAdvPeerId peer@(Peer pid _) = (pid, peer) - --- | Make a 'Peers' structure from a non-empty list of peers. Fail if the honest --- peer is not exactly once in the list. -peersFromPeerList :: NonEmpty (Peer a) -> Peers a -peersFromPeerList = - uncurry mkPeers' . extractHonestPeer . NonEmpty.toList +peers' :: [a] -> [a] -> Peers a +peers' hs as = + Peers + { honestPeers = Map.fromList $ zip [1 ..] hs, + adversarialPeers = Map.fromList $ zip [1 ..] as + } + +-- | Make a 'Peers' structure from individual 'Peer's. +peersFromPeerList :: [Peer a] -> Peers a +peersFromPeerList peers = + let (hs, as) = partitionPeers peers + in Peers + { honestPeers = Map.fromList hs, + adversarialPeers = Map.fromList as + } where - -- | Return the value associated with the honest peer and the list of peers - -- excluding the honest one. - extractHonestPeer :: [Peer a] -> (a, [Peer a]) - extractHonestPeer [] = error "could not find honest peer" - extractHonestPeer (Peer HonestPeer value : peers) = (value, peers) - extractHonestPeer (peer : peers) = (peer :) <$> extractHonestPeer peers - --- | Make a 'Peers' structure from a non-empty list of peer ids and a default --- value. Fails if the honest peer is not exactly once in the list. -peersFromPeerIdList :: NonEmpty PeerId -> a -> Peers a + partitionPeers :: [Peer a] -> ([(Int, a)], [(Int, a)]) + partitionPeers = + foldl + ( \(hs, as) (Peer pid v) -> case pid of + HonestPeer n -> ((n, v) : hs, as) + AdversarialPeer n -> (hs, (n, v) : as) + ) + ([], []) + +-- | Make a 'Peers' structure from a list of peer ids and a default value. +peersFromPeerIdList :: [PeerId] -> a -> Peers a peersFromPeerIdList = flip $ \val -> peersFromPeerList . fmap (flip Peer val) -- | Like 'peersFromPeerIdList' with @()@. -peersFromPeerIdList' :: NonEmpty PeerId -> Peers () +peersFromPeerIdList' :: [PeerId] -> Peers () peersFromPeerIdList' = flip peersFromPeerIdList () -toMap :: Peers a -> Map PeerId (Peer a) -toMap Peers{honest, others} = Map.insert HonestPeer honest others - -- | Same as 'toMap' but the map contains unwrapped values. toMap' :: Peers a -> Map PeerId a -toMap' = fmap (\(Peer _ v) -> v) . toMap +toMap' Peers {honestPeers, adversarialPeers} = + Map.union + (Map.mapKeysMonotonic HonestPeer honestPeers) + (Map.mapKeysMonotonic AdversarialPeer adversarialPeers) -fromMap :: Map PeerId (Peer a) -> Peers a -fromMap peers = Peers{ - honest = peers Map.! HonestPeer, - others = Map.delete HonestPeer peers - } +toMap :: Peers a -> Map PeerId (Peer a) +toMap = Map.mapWithKey Peer . toMap' -- | Same as 'fromMap' but the map contains unwrapped values. fromMap' :: Map PeerId a -> Peers a -fromMap' = fromMap . Map.mapWithKey Peer +fromMap' peers = + let (honestPeers, adversarialPeers) = + Map.mapEitherWithKey + ( \case + HonestPeer _ -> Left + AdversarialPeer _ -> Right + ) + peers + in Peers + { honestPeers = Map.mapKeysMonotonic unHonestPeer honestPeers, + adversarialPeers = Map.mapKeysMonotonic unAdversarialPeer adversarialPeers + } + where + unHonestPeer (HonestPeer n) = n + unHonestPeer _ = error "unHonestPeer: not a honest peer" + unAdversarialPeer (AdversarialPeer n) = n + unAdversarialPeer _ = error "unAdversarialPeer: not an adversarial peer" + +fromMap :: Map PeerId (Peer a) -> Peers a +fromMap = fromMap' . Map.map value + +deletePeer :: PeerId -> Peers a -> Peers a +deletePeer (HonestPeer n) Peers {honestPeers, adversarialPeers} = + Peers {honestPeers = Map.delete n honestPeers, adversarialPeers} +deletePeer (AdversarialPeer n) Peers {honestPeers, adversarialPeers} = + Peers {honestPeers, adversarialPeers = Map.delete n adversarialPeers} + +isHonestPeerId :: PeerId -> Bool +isHonestPeerId (HonestPeer _) = True +isHonestPeerId _ = False + +isAdversarialPeerId :: PeerId -> Bool +isAdversarialPeerId (AdversarialPeer _) = True +isAdversarialPeerId _ = False diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index 8332e62021..a574077634 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -2,8 +2,8 @@ {-# LANGUAGE NamedFieldPuns #-} module Test.Consensus.PointSchedule.Shrinking ( - shrinkByRemovingAdversaries -- | Exported only for testing (that is, checking the properties of the function) + shrinkByRemovingAdversaries , shrinkHonestPeer , shrinkPeerSchedules ) where @@ -11,7 +11,9 @@ module Test.Consensus.PointSchedule.Shrinking ( import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Containers.ListUtils (nubOrd) +import Data.Function ((&)) import Data.Functor ((<&>)) +import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (mapMaybe, maybeToList) import Ouroboros.Network.AnchoredFragment (AnchoredFragment, @@ -22,9 +24,10 @@ import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule (GenesisTest (..), GenesisTestFull, PeerSchedule, PeersSchedule, peerSchedulesBlocks) -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..)) +import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (shrinkList) +import Test.Util.PartialAccessors import Test.Util.TestBlock (TestBlock, isAncestorOf, isStrictAncestorOf) @@ -38,16 +41,19 @@ shrinkPeerSchedules :: [GenesisTestFull TestBlock] shrinkPeerSchedules genesisTest _stateView = let trimmedBlockTree sch = trimBlockTree' sch (gtBlockTree genesisTest) - shrunkOthers = shrinkOtherPeers shrinkPeerSchedule (gtSchedule genesisTest) <&> - \shrunkSchedule -> genesisTest - { gtSchedule = shrunkSchedule - , gtBlockTree = trimmedBlockTree shrunkSchedule - } - shrunkHonest = shrinkHonestPeer - (gtSchedule genesisTest) - -- No need to update the tree here, shrinking the honest peer never discards blocks - <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} - in shrunkOthers ++ shrunkHonest + shrunkAdversarialPeers = + shrinkAdversarialPeers shrinkPeerSchedule (gtSchedule genesisTest) + <&> \shrunkSchedule -> + genesisTest + { gtSchedule = shrunkSchedule, + gtBlockTree = trimmedBlockTree shrunkSchedule + } + shrunkHonestPeers = + shrinkHonestPeers + (gtSchedule genesisTest) + -- No need to update the tree here, shrinking the honest peers never discards blocks + <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} + in shrunkAdversarialPeers ++ shrunkHonestPeers -- | Shrink a 'Peers PeerSchedule' by removing adversaries. This does not affect -- the honest peer; and it does not remove ticks from the schedules of the @@ -57,7 +63,7 @@ shrinkByRemovingAdversaries :: StateView TestBlock -> [GenesisTestFull TestBlock] shrinkByRemovingAdversaries genesisTest _stateView = - shrinkOtherPeers (const []) (gtSchedule genesisTest) <&> \shrunkSchedule -> + shrinkAdversarialPeers (const []) (gtSchedule genesisTest) <&> \shrunkSchedule -> let trimmedBlockTree = trimBlockTree' shrunkSchedule (gtBlockTree genesisTest) in (genesisTest{gtSchedule = shrunkSchedule, gtBlockTree = trimmedBlockTree}) @@ -68,25 +74,45 @@ shrinkPeerSchedule = shrinkList (const []) -- | Shrink the 'others' field of a 'Peers' structure by attempting to remove -- peers or by shrinking their values using the given shrinking function. -shrinkOtherPeers :: (a -> [a]) -> Peers a -> [Peers a] -shrinkOtherPeers shrink Peers{honest, others} = - map (Peers honest . Map.fromList) $ - shrinkList (traverse (traverse shrink)) $ Map.toList others +shrinkAdversarialPeers :: (a -> [a]) -> Peers a -> [Peers a] +shrinkAdversarialPeers shrink Peers {honestPeers, adversarialPeers} = + map (Peers honestPeers . Map.fromList) $ + shrinkList (traverse shrink) $ + Map.toList adversarialPeers + +-- | Shrinks honest peers by removing ticks. Because we are manipulating +-- 'PeerSchedule' at this point, there is no proper notion of a tick. Instead, +-- we remove points from the honest 'PeerSchedule', and move all other points +-- sooner, including those on the other schedules. We check that this operation +-- neither changes the final state of the honest peer, nor removes points from +-- the other schedules. +shrinkHonestPeers :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] +shrinkHonestPeers Peers {honestPeers, adversarialPeers} = + Map.toList honestPeers + & concatMap + ( \(n, schedule) -> + shrinkTheHonestPeer schedule (Map.delete n honestPeers) adversarialPeers + & map + ( \(schedule', otherHonestPeers', otherAdversarialPeers') -> + Peers + { honestPeers = Map.insert n schedule' otherHonestPeers', + adversarialPeers = otherAdversarialPeers' + } + ) + ) --- | Shrinks an honest peer by removing ticks. --- Because we are manipulating 'PeerSchedule' at that point, there is no proper --- notion of a tick. Instead, we remove points of the honest 'PeerSchedule', --- and move all other points sooner, including those on the adversarial schedule. --- We check that this operation neither changes the final state of the honest peer, --- nor that it removes points from the adversarial schedules. -shrinkHonestPeer :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] -shrinkHonestPeer Peers{honest, others} = do +shrinkTheHonestPeer :: + PeerSchedule blk -> + Map Int (PeerSchedule blk) -> + Map Int (PeerSchedule blk) -> + [(PeerSchedule blk, Map Int (PeerSchedule blk), Map Int (PeerSchedule blk))] +shrinkTheHonestPeer theSchedule otherHonestPeers otherAdversarialPeers = do (at, speedUpBy) <- splits - (honest', others') <- maybeToList $ do - honest' <- traverse (speedUpHonestSchedule at speedUpBy) honest - others' <- mapM (traverse (speedUpAdversarialSchedule at speedUpBy)) others - pure (honest', others') - pure $ Peers honest' others' + maybeToList $ do + theSchedule' <- speedUpTheSchedule at speedUpBy theSchedule + otherHonestPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherHonestPeers + otherAdversarialPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherAdversarialPeers + pure (theSchedule', otherHonestPeers', otherAdversarialPeers') where -- | A list of non-zero time intervals between successive points of the honest schedule splits :: [(Time, DiffTime)] @@ -96,15 +122,29 @@ shrinkHonestPeer Peers{honest, others} = do then Nothing else Just (t1, diffTime t2 t1) ) - (zip (value honest) (drop 1 $ value honest)) + (zip theSchedule (drop 1 theSchedule)) + +-- | For testing purposes only. Assumes there is exactly one honest peer and +-- shrinks it. +shrinkHonestPeer :: PeersSchedule blk -> [PeersSchedule blk] +shrinkHonestPeer Peers {honestPeers, adversarialPeers} = + shrinkTheHonestPeer (getHonestPeer honestPeers) Map.empty adversarialPeers + & map + ( \(schedule', _, otherAdversarialPeers') -> + Peers + { honestPeers = Map.singleton 1 schedule', + adversarialPeers = otherAdversarialPeers' + } + ) --- | Speeds up an honest schedule after `at` time, by `speedUpBy`. --- This “speeding up” is done by subtracting @speedUpBy@ to all points after @at@, --- and removing those points if they fall before `at`. We check that the operation --- doesn't change the final state of the peer, i.e. it doesn't remove all TP, HP, and BP --- in the sped up part. -speedUpHonestSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpHonestSchedule at speedUpBy sch = +-- | Speeds up _the_ schedule (that is, the one that we are actually trying to +-- speed up) after `at` time, by `speedUpBy`. This "speeding up" is done by +-- removing `speedUpBy` to all points after `at`, and removing those points if +-- they fall before `at`. We check that the operation doesn't change the final +-- state of the peer, i.e. it doesn't remove all TP, HP, and BP in the sped up +-- part. +speedUpTheSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) +speedUpTheSchedule at speedUpBy sch = if stillValid then Just $ beforeSplit ++ spedUpSchedule else Nothing where (beforeSplit, afterSplit) = span ((< at) . fst) sch @@ -120,12 +160,12 @@ speedUpHonestSchedule at speedUpBy sch = hasHP = any (\case (_, ScheduleHeaderPoint _) -> True; _ -> False) hasBP = any (\case (_, ScheduleBlockPoint _) -> True; _ -> False) --- | Speeds up an adversarial schedule after `at` time, by `speedUpBy`. --- This "speeding up" is done by removing `speedUpBy` to all points after `at`. --- We check that the schedule had no points between `at` and `at + speedUpBy`. --- We also keep the last point where it is, so that the end time stays the same. -speedUpAdversarialSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpAdversarialSchedule at speedUpBy sch = +-- | Speeds up the other schedules after `at` time, by `speedUpBy`. This +-- "speeding up" is done by removing `speedUpBy` to all points after `at`. We +-- check that the schedule had no points between `at` and `at + speedUpBy`. We +-- also keep the last point where it is, so that the end time stays the same. +speedUpOtherSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) +speedUpOtherSchedule at speedUpBy sch = if losesPoint then Nothing else Just $ beforeSplit ++ spedUpSchedule ++ lastPoint where (beforeSplit, afterSplit) = span ((< at) . fst) sch diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index 8332f7be83..ed3b1a0003 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -11,7 +11,7 @@ import Test.Consensus.Genesis.Setup (genChains) import Test.Consensus.Genesis.Tests.Uniform (genUniformSchedulePoints) import Test.Consensus.PointSchedule (PeerSchedule, PeersSchedule, prettyPeersSchedule) -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..)) +import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.Shrinking (shrinkHonestPeer) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (Property, conjoin, counterexample) @@ -45,7 +45,7 @@ lastM [a] = Just a lastM (_:ps) = lastM ps samePeers :: PeersSchedule blk -> PeersSchedule blk -> Bool -samePeers sch1 sch2 = (keys $ others sch1) == (keys $ others sch2) +samePeers sch1 sch2 = (keys $ adversarialPeers sch1) == (keys $ adversarialPeers sch2) -- | Checks whether at least one peer schedule in the second given peers schedule -- is shorter than its corresponding one in the fist given peers schedule. “Shorter” @@ -84,8 +84,8 @@ doesNotRemoveAdversarialPoints original shrunk = samePeers original shrunk && (and $ zipWith (\oldSch newSch -> fmap snd oldSch == fmap snd newSch) - (toList $ (fmap value) $ others original) - (toList $ (fmap value) $ others shrunk) + (toList $ adversarialPeers original) + (toList $ adversarialPeers shrunk) ) checkShrinkProperty :: (PeersSchedule TestBlock -> PeersSchedule TestBlock -> Bool) -> Property diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs new file mode 100644 index 0000000000..470e3f43df --- /dev/null +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Util/PartialAccessors.hs @@ -0,0 +1,42 @@ +{-# LANGUAGE NamedFieldPuns #-} + +-- | Helpers to access particular parts of trees and schedules +-- Those functions are partial, and are designed to only be used in tests. +-- We know they won't fail there, because we generated the structures +-- with the correct properties. +module Test.Util.PartialAccessors ( + getHonestPeer + , getOnlyBranch + , getOnlyBranchTip + , getTrunkTip + ) where + +import qualified Data.Map as Map +import qualified Ouroboros.Network.AnchoredFragment as AF +import Ouroboros.Network.Block (HasHeader) +import Test.Consensus.BlockTree + +getOnlyBranch :: BlockTree blk -> BlockTreeBranch blk +getOnlyBranch BlockTree {btBranches} = case btBranches of + [branch] -> branch + _ -> error "tree must have exactly one alternate branch" + +getTrunkTip :: HasHeader blk => BlockTree blk -> blk +getTrunkTip tree = case btTrunk tree of + (AF.Empty _) -> error "tree must have at least one block" + (_ AF.:> tipBlock) -> tipBlock + +getOnlyBranchTip :: HasHeader blk => BlockTree blk -> blk +getOnlyBranchTip BlockTree {btBranches} = case btBranches of + [branch] -> case btbFull branch of + (AF.Empty _) -> error "alternate branch must have at least one block" + (_ AF.:> tipBlock) -> tipBlock + _ -> error "tree must have exactly one alternate branch" + +getHonestPeer :: Map.Map Int a -> a +getHonestPeer honests = + if Map.size honests /= 1 + then error "there must be exactly one honest peer" + else case Map.lookup 1 honests of + Nothing -> error "the only honest peer must have id 1" + Just p -> p From 1d7fc955de8b266a2a5afc2ce5c3b724ca032190 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Fri, 10 May 2024 12:08:21 +0000 Subject: [PATCH 06/26] =?UTF-8?q?Add=20some=20adversaries=20to=20the=20?= =?UTF-8?q?=E2=80=9Chappy=20path=E2=80=9D=20test?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../Test/Consensus/Genesis/Tests/CSJ.hs | 110 ++++++++++-------- .../Test/Consensus/PointSchedule/Peers.hs | 8 ++ 2 files changed, 72 insertions(+), 46 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index d0b8489952..d333eec6ee 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -3,10 +3,10 @@ module Test.Consensus.Genesis.Tests.CSJ (tests) where -import Data.Containers.ListUtils (nubOrd) import Data.List (nub) +import qualified Data.Map.Strict as Map import Data.Maybe (mapMaybe) -import Ouroboros.Consensus.Block (blockSlot, succWithOrigin) +import Ouroboros.Consensus.Block (Header, blockSlot, succWithOrigin) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client (TraceChainSyncClientEvent (..)) import Ouroboros.Consensus.Util.Condense (PaddingDirection (..), @@ -20,52 +20,67 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), import Test.Consensus.PeerSimulator.StateView (StateView (..)) import Test.Consensus.PeerSimulator.Trace (TraceEvent (..)) import Test.Consensus.PointSchedule +import qualified Test.Consensus.PointSchedule.Peers as Peers import Test.Consensus.PointSchedule.Peers (Peers (..), peers') import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () import Test.Util.PartialAccessors -import Test.Util.TestBlock (Header, TestBlock) +import Test.Util.TestBlock (TestBlock) import Test.Util.TestEnv (adjustQuickCheckMaxSize) tests :: TestTree tests = adjustQuickCheckMaxSize (`div` 5) $ - testGroup - "CSJ" - [ testGroup "Happy Path" - [ testProperty "synchronous" $ prop_happyPath True - , testProperty "asynchronous" $ prop_happyPath False + testGroup + "CSJ" + [ testGroup + "Happy Path" + [ testProperty "honest peers are synchronised" $ prop_CSJ True True, + testProperty "honest peers do their own thing" $ prop_CSJ True False + ], + testGroup + "With some adversaries" + [ testProperty "honest peers are synchronised" $ prop_CSJ False True, + testProperty "honest peers do their own thing" $ prop_CSJ False False + ] ] - ] --- | Test of the “happy path” scenario of ChainSync Jumping (CSJ). +-- | Test of ChainSync Jumping (CSJ). -- --- This test features one chain (ie. a block tree that is only trunk) and only --- honest peers and syncs the chain in question with CSJ enabled. What we expect --- to observe is that one of the honest peers becomes the dynamo while the --- others become jumpers. Because the jumpers will agree to all the jumps, the --- whole syncing should happen with CSJ without objectors. +-- This test features several peers the all sync the “honest” chain (ie. the +-- trunk of the block tree) with CSJ enabled. What we expect to observe is that +-- one of the honest peers becomes the dynamo while the others become jumpers. +-- Because the jumpers will agree to all the jumps, the whole syncing should +-- happen with CSJ. -- --- The final property is that headers should only ever be downloaded once and --- only from one peer (the dynamo). This is true except when almost caught-up: --- when the dynamo is caught-up, it gets disengaged and one of the jumpers takes --- its place and starts serving headers. This might lead to duplication of --- headers, but only in a window of @jumpSize@ slots near the tip of the chain. +-- There are two variants of this test: the “happy path” variant features no +-- adversaries. As such, everything should happen with one dynamo and no +-- objector. Another variant adds adversaries, so we expect to see some +-- dynamo-vs-objector action. -- --- The boolean differentiates between “synchronous” and “asynchronous” --- scenarios. In a synchronous scenario, all the honest peers have the same --- schedule: they serve the chain exactly in the same way. In the asynchronous --- scenario, a random schedule is generated for each peer (but they still serve --- the same chain). -prop_happyPath :: Bool -> Property -prop_happyPath synchronized = +-- Regardless, the final property is that “honest” headers should only ever be +-- downloaded at most once from honest peers. They may however be downloaded +-- several times from adversaries. This is true except when almost caught-up: +-- when the dynamo or objector is caught-up, it gets disengaged and one of the +-- jumpers takes its place and starts serving headers. This might lead to +-- duplication of headers, but only in a window of @jumpSize@ slots near the tip +-- of the chain. +-- +-- The first boolean differentiates between the “happy path” variant and the +-- variant with adversaries; the second boolean differentiates between +-- “synchronous” and “asynchronous” scenarios. In a synchronous scenario, all +-- the honest peers have the same schedule: they serve the chain exactly in the +-- same way. In the asynchronous scenario, a random schedule is generated for +-- each peer (but they still serve the same chain). +prop_CSJ :: Bool -> Bool -> Property +prop_CSJ happy synchronized = forAllGenesisTest ( if synchronized - then genChainsWithExtraHonestPeers (choose (2, 4)) (pure 0) - `enrichedWith` genUniformSchedulePoints - else genChains (pure 0) + then genChains (if happy then pure 0 else choose (2, 4)) `enrichedWith` genDuplicatedHonestSchedule + else genChainsWithExtraHonestPeers (choose (2, 4)) (if happy then pure 0 else choose (2, 4)) + `enrichedWith` genUniformSchedulePoints ) ( defaultSchedulerConfig { scEnableCSJ = True @@ -83,43 +98,46 @@ prop_happyPath synchronized = -- The list of 'TraceDownloadedHeader' events that are not newer than -- jumpSize from the tip of the chain. These are the ones that we -- expect to see only once per header if CSJ works properly. - headerDownloadEvents = + headerHonestDownloadEvents = mapMaybe (\case TraceChainSyncClientEvent pid (TraceDownloadedHeader hdr) | not (isNewerThanJumpSizeFromTip gt hdr) + , Peers.HonestPeer _ <- pid -> Just (pid, hdr) _ -> Nothing ) svTrace - receivedHeadersOnlyOnce = length (nub $ snd <$> headerDownloadEvents) == length headerDownloadEvents - -- NOTE: If all the headers are newer than jumpSize from the tip, then - -- 'headerDownloadEvents' is empty and the following condition would - -- violated if we used @==@. - receivedHeadersFromOnlyOnePeer = length (nubOrd $ fst <$> headerDownloadEvents) <= 1 + receivedHeadersAtMostOnceFromHonestPeers = + length (nub $ snd <$> headerHonestDownloadEvents) == length headerHonestDownloadEvents in tabulate "" - [ if headerDownloadEvents == [] - then "All headers may be downloaded twice (uninteresting test)" + [ if headerHonestDownloadEvents == [] + then "All headers are within the last jump window" else "There exist headers that have to be downloaded exactly once" ] $ counterexample ("Downloaded headers (except jumpSize slots near the tip):\n" ++ ( unlines $ fmap (" " ++) $ zipWith (\peer header -> peer ++ " | " ++ header) - (condenseListWithPadding PadRight $ fst <$> headerDownloadEvents) - (condenseListWithPadding PadRight $ snd <$> headerDownloadEvents) + (condenseListWithPadding PadRight $ fst <$> headerHonestDownloadEvents) + (condenseListWithPadding PadRight $ snd <$> headerHonestDownloadEvents) ) ) - (receivedHeadersOnlyOnce && receivedHeadersFromOnlyOnePeer) + receivedHeadersAtMostOnceFromHonestPeers ) where genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PeersSchedule TestBlock) - genDuplicatedHonestSchedule gt@GenesisTest{gtExtraHonestPeers} = do - Peers {honestPeers} <- genUniformSchedulePoints gt - pure $ peers' - (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) - [] + genDuplicatedHonestSchedule gt@GenesisTest {gtExtraHonestPeers} = do + Peers {honestPeers, adversarialPeers} <- genUniformSchedulePoints gt + pure $ + Peers.unionWithKey + (\_ _ _ -> error "should not happen") + ( peers' + (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) + [] + ) + (Peers Map.empty adversarialPeers) isNewerThanJumpSizeFromTip :: GenesisTestFull TestBlock -> Header TestBlock -> Bool isNewerThanJumpSizeFromTip gt hdr = diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs index 13742d1d65..973de5ef3a 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Peers.hs @@ -35,6 +35,7 @@ module Test.Consensus.PointSchedule.Peers ( , peersOnlyHonest , toMap , toMap' + , unionWithKey , updatePeer ) where @@ -204,6 +205,13 @@ peersFromPeerList peers = ) ([], []) +unionWithKey :: (PeerId -> a -> a -> a) -> Peers a -> Peers a -> Peers a +unionWithKey f peers1 peers2 = + Peers + { honestPeers = Map.unionWithKey (f . HonestPeer) (honestPeers peers1) (honestPeers peers2), + adversarialPeers = Map.unionWithKey (f . AdversarialPeer) (adversarialPeers peers1) (adversarialPeers peers2) + } + -- | Make a 'Peers' structure from a list of peer ids and a default value. peersFromPeerIdList :: [PeerId] -> a -> Peers a peersFromPeerIdList = flip $ \val -> peersFromPeerList . fmap (flip Peer val) From cb5b69ad2d07089b93fc836fd0b91ab7009b461e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Tue, 7 May 2024 12:54:07 +0000 Subject: [PATCH 07/26] Edit documentation of LoE and GDD Also: * Remove ill-defined LoELimit * Remove unused updateLoEFragStall * Rename runGdd to runGDDGovernor * Print GDD in traces instead of GDG * Remove the UpdateLoEFrag callback * Reorder functions in the Governor module --- .../Test/Consensus/PeerSimulator/Run.hs | 14 +- .../Test/Consensus/PeerSimulator/Trace.hs | 2 +- .../Ouroboros/Consensus/Genesis/Governor.hs | 294 +++++++++--------- .../Consensus/Storage/ChainDB/API.hs | 32 +- .../Storage/ChainDB/Impl/ChainSel.hs | 19 +- .../Consensus/Storage/ChainDB/Impl/Paths.hs | 6 +- 6 files changed, 176 insertions(+), 191 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index c3c6298f43..a5873f5a0f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -21,8 +21,7 @@ import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig (..)) -import Ouroboros.Consensus.Genesis.Governor (runGdd, - updateLoEFragGenesis) +import Ouroboros.Consensus.Genesis.Governor (runGDDGovernor) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client @@ -365,11 +364,16 @@ startNode schedulerConfig genesisTest interval = do -- peer fragments than registered clients. BlockFetch.startBlockFetchLogic lrRegistry lrTracer lnChainDb fetchClientRegistry getCandidates - let gdd = updateLoEFragGenesis lrConfig (mkGDDTracerTestBlock lrTracer) (readTVar handles) - gddTrigger = viewChainSyncState handles (\ s -> (csLatestSlot s, csIdling s)) + let gddTrigger = viewChainSyncState handles (\ s -> (csLatestSlot s, csIdling s)) for_ lrLoEVar $ \ var -> do forkLinkedThread lrRegistry "LoE updater background" $ - void $ runGdd gdd var lnChainDb gddTrigger + void $ runGDDGovernor + lrConfig + (mkGDDTracerTestBlock lrTracer) + (readTVar handles) + var + lnChainDb + gddTrigger where LiveResources {lrRegistry, lrTracer, lrConfig, lrPeerSim, lrLoEVar} = resources diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs index 18eddfcced..06b03ef95f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs @@ -483,7 +483,7 @@ terseGDDEvent :: TraceGDDEvent PeerId TestBlock -> String terseGDDEvent = \case TraceGDDEvent {sgen = GenesisWindow sgen, curChain, bounds, candidates, candidateSuffixes, losingPeers, loeHead} -> unlines $ [ - "GDG | Window: " ++ window sgen loeHead, + "GDD | Window: " ++ window sgen loeHead, " Selection: " ++ terseHFragment curChain, " Candidates:" ] ++ diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 9a4a34ee59..7d515b0069 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -13,25 +13,23 @@ -- The GDD governor is the component responsible for identifying and -- disconnecting peers offering sparser chains than the best. This has the -- effect of unblocking the Limit on Eagerness, since removing disagreeing --- peers allows the current selection to advance. +-- peers allows the current selection to advance. See +-- 'Ouroboros.Consensus.Storage.ChainDB.API.LoE' for more details. -- --- The GDD governor, invoked with 'runGdd', is supposed to run in a background +-- The GDD governor, invoked with 'runGDDGovernor', is supposed to run in a background -- thread. It evaluates candidate chains whenever they change, or whenever a -- peer claims to have no more headers, or whenever a peer starts sending -- headers beyond the forecast horizon. -- --- Whenever GDD disconnects peers, the chain selection is updated. +-- Whenever GDD disconnects peers, and as a result the youngest header present +-- in all candidate fragments changes, the chain selection is updated. -- module Ouroboros.Consensus.Genesis.Governor ( DensityBounds (..) , TraceGDDEvent (..) - , UpdateLoEFrag (..) , densityDisconnect - , runGdd + , runGDDGovernor , sharedCandidatePrefix - , updateLoEFragGenesis - , updateLoEFragStall - , updateLoEFragUnconditional ) where import Control.Monad (guard, when) @@ -43,7 +41,6 @@ import qualified Data.Map.Strict as Map import Data.Maybe (maybeToList) import Data.Void import Data.Word (Word64) -import GHC.Generics (Generic) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig, configLedger, configSecurityParam) @@ -68,29 +65,124 @@ import Ouroboros.Consensus.Util.STM (blockUntilChanged) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF --- | An action representing an update to the LoE fragment, that determines which --- blocks can be selected in the ChainDB. With Ouroboros Genesis, this is --- implemented via the GDD governor, see 'updateLoEFragGenesis'. +-- | A never ending computation that evaluates the GDD rule whenever +-- the STM action @getTrigger@ yields a different result, writing the LoE +-- fragment to @varLoEFrag@, and then triggering ChainSel to reprocess all +-- blocks that had previously been postponed by the LoE. -- --- The callback is applied to the current chain and the current ledger state, --- and yields the new LoE fragment, which should be anchored in the immutable --- tip. -data UpdateLoEFrag m blk = UpdateLoEFrag { - updateLoEFrag :: - AnchoredFragment (Header blk) - -> ExtLedgerState blk - -> m (AnchoredFragment (Header blk)) - } - deriving stock (Generic) - deriving anyclass (NoThunks) +-- Evaluating the GDD rule might cause peers to be disconnected if they have +-- sparser chains than the best chain. +-- +-- The LoE fragment is the fragment anchored at the immutable tip and ending at +-- the LoE tip. +-- +-- @getHandles@ is the callback to get the handles that allow to disconnect +-- from peers. +-- +runGDDGovernor :: + ( Monoid a, + Eq a, + HasHardForkHistory blk, + IOLike m, + LedgerSupportsProtocol blk, + Ord peer + ) => + TopLevelConfig blk -> + Tracer m (TraceGDDEvent peer blk) -> + STM m (Map peer (ChainSyncClientHandle m blk)) -> + StrictTVar m (AnchoredFragment (Header blk)) -> + ChainDB m blk -> + STM m a -> + m Void +runGDDGovernor cfg tracer getHandles varLoEFrag chainDb getTrigger = + spin mempty + where + spin oldTrigger = do + (newTrigger, curChain, curLedger) <- atomically $ do + (_, newTrigger) <- blockUntilChanged id oldTrigger getTrigger + curChain <- ChainDB.getCurrentChain chainDb + curLedger <- ChainDB.getCurrentLedger chainDb + pure (newTrigger, curChain, curLedger) + loeFrag <- evaluateGDD cfg tracer getHandles curChain curLedger + oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag + -- The chain selection only depends on the LoE tip, so there + -- is no point in retriggering it if the LoE tip hasn't changed. + when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ + triggerChainSelectionAsync chainDb + spin newTrigger --- | A dummy version of the LoE that sets the LoE fragment to the current --- selection. This can be seen as emulating Praos behavior. -updateLoEFragUnconditional :: - MonadSTM m => - UpdateLoEFrag m blk -updateLoEFragUnconditional = - UpdateLoEFrag $ \ curChain _ -> pure curChain +-- | Disconnect peers that lose density comparisons and recompute the LoE fragment. +-- +-- Disconnecting peers causes candidate fragments to be removed, which causes +-- the GDD governor to reevaluate GDD over and over until no more peers are +-- disconnected. +-- +-- @getHandles@ is the callback to get the handles that allow to disconnect +-- from peers. +-- +-- @curChain@ is the current chain selection. +-- +-- @immutableLedgerSt@ is the current ledger state. +-- +-- Yields the new LoE fragment. +-- +evaluateGDD :: + forall m blk peer. + ( IOLike m + , Ord peer + , LedgerSupportsProtocol blk + , HasHardForkHistory blk + ) + => TopLevelConfig blk + -> Tracer m (TraceGDDEvent peer blk) + -> STM m (Map peer (ChainSyncClientHandle m blk)) + -> AnchoredFragment (Header blk) + -> ExtLedgerState blk + -> m (AnchoredFragment (Header blk)) +evaluateGDD cfg tracer getHandles curChain immutableLedgerSt = do + (states, candidates, candidateSuffixes, handles, loeFrag) <- atomically $ do + handles <- getHandles + states <- traverse (readTVar . cschState) handles + let + candidates = csCandidate <$> states + (loeFrag, candidateSuffixes) = + sharedCandidatePrefix curChain candidates + pure (states, candidates, candidateSuffixes, handles, loeFrag) + + let msgen :: Maybe GenesisWindow + -- This could also use 'runWithCachedSummary' if deemed desirable. + msgen = eitherToMaybe $ runQuery qry summary + where + -- We use the Genesis window for the first slot /after/ the common + -- intersection. In particular, when the intersection is the last + -- slot of an era, we will use the Genesis window of the next era, + -- as all slots in the Genesis window reside in that next era. + slot = succWithOrigin $ AF.headSlot loeFrag + qry = qryFromExpr $ slotToGenesisWindow slot + summary = + hardForkSummary + (configLedger cfg) + -- Due to the cross-chain lemma (Property 17.3 in the Consensus + -- report) one could also use the ledger state at the tip of our + -- selection here (in which case this should never return + -- 'Nothing'), but this is subtle and maybe not desirable. + -- + -- In any case, the immutable ledger state will also + -- /eventually/ catch up to the LoE tip, so @msgen@ won't be + -- 'Nothing' forever. + (ledgerState immutableLedgerSt) + + whenJust msgen $ \sgen -> do + let + (losingPeers, bounds) = + densityDisconnect sgen (configSecurityParam cfg) states candidateSuffixes loeFrag + loeHead = AF.headAnchor loeFrag + + traceWith tracer TraceGDDEvent {sgen, curChain, bounds, candidates, candidateSuffixes, losingPeers, loeHead} + + for_ losingPeers $ \peer -> cschGDDKill (handles Map.! peer) + + pure loeFrag -- | Compute the fragment @loeFrag@ between the immutable tip and the -- earliest intersection between @curChain@ and any of the @candidates@. @@ -121,55 +213,6 @@ sharedCandidatePrefix curChain candidates = -- 'Map' via 'mapMaybe'. Map.mapMaybe splitAfterImmutableTip candidates --- | This version of the LoE implements part of the intended Genesis approach. --- The fragment is set to the prefix of all candidates, ranging from the --- immutable tip to the earliest intersection of all peers. --- --- Using this will cause ChainSel to stall indefinitely, or until a peer --- disconnects for unrelated reasons. --- In the future, the Genesis Density Disconnect Governor variant will extend --- this with an analysis that will always result in disconnections from peers --- to ensure the selection can advance. -updateLoEFragStall :: - MonadSTM m => - GetHeader blk => - STM m (Map peer (AnchoredFragment (Header blk))) -> - UpdateLoEFrag m blk -updateLoEFragStall getCandidates = - UpdateLoEFrag $ \ curChain _ -> - atomically $ do - candidates <- getCandidates - pure (fst (sharedCandidatePrefix curChain candidates)) - --- | A never ending computation that runs the GDD governor whenever --- the STM action @getTrigger@ changes, writing the LoE fragment --- computed by @loEUpdater@ to @varLoEFrag@, and then triggering --- ChainSel to reprocess all blocks that had previously been --- postponed by the LoE. -runGdd :: - (Monoid a, Eq a, IOLike m, LedgerSupportsProtocol blk) => - UpdateLoEFrag m blk -> - StrictTVar m (AnchoredFragment (Header blk)) -> - ChainDB m blk -> - STM m a -> - m Void -runGdd loEUpdater varLoEFrag chainDb getTrigger = - spin mempty - where - spin oldTrigger = do - (newTrigger, curChain, curLedger) <- atomically $ do - (_, newTrigger) <- blockUntilChanged id oldTrigger getTrigger - curChain <- ChainDB.getCurrentChain chainDb - curLedger <- ChainDB.getCurrentLedger chainDb - pure (newTrigger, curChain, curLedger) - loeFrag <- updateLoEFrag loEUpdater curChain curLedger - oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag - -- The chain selection only depends on the LoE tip, so there - -- is no point in retriggering it if the LoE tip hasn't changed. - when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ - triggerChainSelectionAsync chainDb - spin newTrigger - data DensityBounds blk = DensityBounds { clippedFragment :: AnchoredFragment (Header blk), @@ -194,8 +237,14 @@ data DensityBounds blk = -- ChainSync instruction the peer sent, and whether the peer is idling (i.e. it -- sent @MsgAwaitReply@). -- --- @loeFrag@ is the fragment from the immutable tip to the first intersection --- with a candidate fragment. +-- @loeFrag@ is the fragment anchored at the immutable tip and ending in the +-- LoE tip. +-- +-- ChainSync jumping depends on this function to disconnect either of any two +-- peers that offer different chains and provided a header in the last slot of +-- the genesis window or later. Either of them should be disconnected, even if +-- both of them are serving adversarial chains. See +-- "Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping" for more details. -- densityDisconnect :: ( Ord peer @@ -281,6 +330,9 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe guard $ AF.lastPoint frag0 /= AF.lastPoint frag1 -- peer1 offers more than k blocks or peer0 has sent all headers in the -- genesis window after the intersection (idling or not) + -- + -- Checking for offersMoreThanK is important to avoid disconnecting + -- competing honest peers when the syncing node is nearly caught up. guard $ offersMoreThanK || lb0 == ub0 -- peer1 has the same or better density than peer0 -- If peer0 is idling, we assume no more headers will be sent. @@ -292,6 +344,15 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe -- This matters to ChainSync jumping, where adversarial dynamo and -- objector could offer chains of equal density. guard $ lb1 >= (if idling0 then lb0 else ub0) + + -- We disconnect peer0 if there is at least another peer peer1 with a + -- chain which is at least as good, and peer0 is either idling or there is + -- no extension to peer0's chain that can make it better than peer1's, and + -- peer1's has more than k headers or peer0 has sent all its headers in + -- the genesis window anchored at the intersection. + -- + -- A chain is "as good as another" if it has at least as many headers in + -- the genesis window anchored at the intersection. pure peer0 loeIntersectionSlot = AF.headSlot loeFrag @@ -338,74 +399,3 @@ data TraceGDDEvent peer blk = loeHead :: AF.Anchor (Header blk), sgen :: GenesisWindow } - --- | Update the LoE fragment. --- --- See 'UpdateLoEFrag' for the definition of LoE fragment. --- --- Additionally, disconnect the peers that lose density comparisons. --- --- Disconnecting peers causes chain fragments to be removed, which causes --- the LoE fragment to be updated over and over until no more peers are --- disconnected. --- --- @getCandidates@ is the callback to obtain the candidate fragments --- --- @getHandles@ is the callback to get the handles that allow to disconnect --- from peers. -updateLoEFragGenesis :: - forall m blk peer. - ( IOLike m - , Ord peer - , LedgerSupportsProtocol blk - , HasHardForkHistory blk - ) - => TopLevelConfig blk - -> Tracer m (TraceGDDEvent peer blk) - -> STM m (Map peer (ChainSyncClientHandle m blk)) - -> UpdateLoEFrag m blk -updateLoEFragGenesis cfg tracer getHandles = - UpdateLoEFrag $ \ curChain immutableLedgerSt -> do - (states, candidates, candidateSuffixes, handles, loeFrag) <- atomically $ do - handles <- getHandles - states <- traverse (readTVar . cschState) handles - let - candidates = csCandidate <$> states - (loeFrag, candidateSuffixes) = - sharedCandidatePrefix curChain candidates - pure (states, candidates, candidateSuffixes, handles, loeFrag) - - let msgen :: Maybe GenesisWindow - -- This could also use 'runWithCachedSummary' if deemed desirable. - msgen = eitherToMaybe $ runQuery qry summary - where - -- We use the Genesis window for the first slot /after/ the common - -- intersection. In particular, when the intersection is the last - -- slot of an era, we will use the Genesis window of the next era, - -- as all slots in the Genesis window reside in that next era. - slot = succWithOrigin $ AF.headSlot loeFrag - qry = qryFromExpr $ slotToGenesisWindow slot - summary = - hardForkSummary - (configLedger cfg) - -- Due to the cross-chain lemma (Property 17.3 in the Consensus - -- report) one could also use the ledger state at the tip of our - -- selection here (in which case this should never return - -- 'Nothing'), but this is subtle and maybe not desirable. - -- - -- In any case, the immutable ledger state will also - -- /eventually/ catch up to the LoE tip, so @msgen@ won't be - -- 'Nothing' forever. - (ledgerState immutableLedgerSt) - - whenJust msgen $ \sgen -> do - let - (losingPeers, bounds) = - densityDisconnect sgen (configSecurityParam cfg) states candidateSuffixes loeFrag - loeHead = AF.headAnchor loeFrag - - traceWith tracer TraceGDDEvent {sgen, curChain, bounds, candidates, candidateSuffixes, losingPeers, loeHead} - - for_ losingPeers $ \peer -> cschGDDKill (handles Map.! peer) - - pure loeFrag diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs index 3a5c03c299..9c756543fa 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs @@ -63,12 +63,10 @@ module Ouroboros.Consensus.Storage.ChainDB.API ( -- * Genesis , GetLoEFragment , LoE (..) - , LoELimit (..) ) where import Control.Monad (void) import Data.Typeable (Typeable) -import Data.Word (Word64) import GHC.Generics (Generic) import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderStateHistory @@ -864,35 +862,29 @@ instance (Typeable blk, StandardHash blk) => Exception (ChainDbError blk) where InvalidIteratorRange {} -> "An invalid range of blocks was requested" --- | The Limit on Eagerness is a mechanism for keeping ChainSel from advancing --- the current selection in the case of competing chains. +-- | The Limit on Eagerness (LoE) is a mechanism for keeping ChainSel from +-- advancing the current selection in the case of competing chains. -- --- The Limit on Eagerness prevents the selection of the node from extending --- more than k blocks after the youngest block that is present on all candidate --- fragments. +-- The LoE tip is the youngest header that is present on all candidate +-- fragments. Thus, after the LoE tip, peers either disagree on how the chain +-- follows, or they do not offer more headers. +-- +-- The LoE restrains the current selection of the node to be on the same chain +-- as the LoE tip, and to not extend more than k blocks from it. -- -- It requires a resolution mechanism to prevent indefinite stalling, which -- is implemented by the Genesis Density Disconnection governor, a component --- that implements an 'UpdateLoEFrag' that disconnects from peers with forks --- it considers inferior. +-- that disconnects from peers with forks it considers inferior. +-- See "Ouroboros.Consensus.Genesis.Governor" for details. -- --- This type indicates whether the feature is enabled, and contains a value --- if it is. +-- This type indicates whether LoE is enabled, and contains a value if it is. data LoE a = -- | The LoE is disabled, so ChainSel will not keep the selection from -- advancing. LoEDisabled | - -- | The LoE is enabled, using the security parameter @k@ as the limit. - -- When the selection's tip is @k@ blocks after the earliest intersection of - -- of all candidate fragments, ChainSel will not add new blocks to the - -- selection. + -- | The LoE is enabled. LoEEnabled a deriving (Eq, Show, Generic, NoThunks, Functor, Foldable, Traversable) type GetLoEFragment m blk = LoE (m (AnchoredFragment (Header blk))) - -data LoELimit = - LoELimit Word64 - | - LoEUnlimited diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 47e142f0fb..28792bca8a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -39,6 +39,7 @@ import Data.Maybe (isJust, isNothing) import Data.Maybe.Strict (StrictMaybe (..), strictMaybeToMaybe) import Data.Set (Set) import qualified Data.Set as Set +import Data.Word (Word64) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config @@ -59,7 +60,7 @@ import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Storage.ChainDB.API (AddBlockPromise (..), AddBlockResult (..), BlockComponent (..), ChainType (..), - InvalidBlockReason (..), LoE (..), LoELimit (..)) + InvalidBlockReason (..), LoE (..)) import Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment (InvalidBlockPunishment, noPunishment) import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as InvalidBlockPunishment @@ -175,9 +176,7 @@ initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid suffixesAfterI :: [NonEmpty (HeaderHash blk)] suffixesAfterI = Paths.maximalCandidates succsOf limit (AF.anchorToPoint i) where - limit = case loE of - LoEEnabled _ -> LoELimit k - LoEDisabled -> LoEUnlimited + limit = k <$ loE constructChain :: NonEmpty (HeaderHash blk) @@ -649,7 +648,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- ^ The current chain and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment - -> LoELimit + -> LoE Word64 -- ^ How many extra blocks to select after @b@ at most. -> m (Point blk) addToCurrentChain succsOf curChainAndLedger loeFrag maxExtra = do @@ -732,7 +731,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- ^ The current chain (anchored at @i@) and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment - -> LoELimit + -> LoE Word64 -- ^ How many extra blocks to select after @b@ at most. -> ChainDiff (HeaderFields blk) -- ^ Header fields for @(x,b]@ @@ -805,23 +804,23 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -> AnchoredFragment x -- ^ The fragment with the new block @b@ as its tip, with the same -- anchor as @curChain@. - -> Maybe LoELimit + -> Maybe (LoE Word64) computeLoEMaxExtra (LoEEnabled loeFrag) newBlockFrag = -- Both fragments are on the same chain if loeSuffixLength == 0 || rollback == 0 then if rollback > k + loeSuffixLength then Nothing - else Just $ LoELimit $ k + loeSuffixLength - rollback + else Just $ LoEEnabled $ k + loeSuffixLength - rollback else if rollback > k then Nothing - else Just $ LoELimit $ k - rollback + else Just $ LoEEnabled $ k - rollback where d = Diff.diff newBlockFrag loeFrag rollback = Diff.getRollback d loeSuffixLength = fromIntegral $ AF.length (Diff.getSuffix d) computeLoEMaxExtra LoEDisabled _ = - Just LoEUnlimited + Just LoEDisabled mkSelectionChangedInfo :: AnchoredFragment (Header blk) -- ^ old chain diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs index 9624e4a023..77f6c99ae5 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs @@ -71,7 +71,7 @@ maximalCandidates :: forall blk. (ChainHash blk -> Set (HeaderHash blk)) -- ^ @filterByPredecessor@ - -> LoELimit -- ^ Max length of any candidate + -> LoE Word64 -- ^ Max length of any candidate -> Point blk -- ^ @B@ -> [NonEmpty (HeaderHash blk)] -- ^ Each element in the list is a list of hashes from which we can @@ -86,7 +86,7 @@ maximalCandidates succsOf loeLimit b = mapMaybe (NE.nonEmpty . applyLoE) $ go (p , candidate <- go (BlockHash next) ] applyLoE - | LoELimit limit <- loeLimit + | LoEEnabled limit <- loeLimit = take (fromIntegral limit) | otherwise = id @@ -105,7 +105,7 @@ extendWithSuccessors :: forall blk. HasHeader blk => (ChainHash blk -> Set (HeaderHash blk)) -> LookupBlockInfo blk - -> LoELimit -- ^ Max extra length for any suffix + -> LoE Word64 -- ^ Max extra length for any suffix -> ChainDiff (HeaderFields blk) -> NonEmpty (ChainDiff (HeaderFields blk)) extendWithSuccessors succsOf lookupBlockInfo loeLimit diff = From 854d72ec9f60f6ef441cf64e20a9819d9aa86419 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Tue, 14 May 2024 12:34:14 +0000 Subject: [PATCH 08/26] Explain more the shrinking choice of test 'serve adversarial branches' and disable timeouts as they are supposed to be --- .../Test/Consensus/Genesis/Tests/Uniform.hs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index ebde97568c..dce4642f06 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -151,11 +151,19 @@ prop_serveAdversarialBranches = forAllGenesisTest , scTrace = False , scEnableLoE = True , scEnableCSJ = True + , scEnableLoP = False + , scEnableChainSyncTimeouts = False + , scEnableBlockFetchTimeouts = False }) -- We cannot shrink by removing points from the adversarial schedules. - -- Otherwise, the immutable tip could get stuck because a peer doesn't - -- send any blocks or headers. + -- Removing ticks could make an adversary unable to serve any blocks or headers. + -- Because LoP and timeouts are disabled, this would cause the immutable tip + -- to get stuck indefinitely, as the adversary wouldn't get disconnected. + -- + -- We don't enable timeouts in this test and we don't wait long enough for + -- timeouts to expire. The leashing attack tests are testing the timeouts + -- together with LoP. shrinkByRemovingAdversaries theProperty From dd651b114624cbe86486805a256387f8adbdfb45 Mon Sep 17 00:00:00 2001 From: Alexander Esgen Date: Thu, 4 Apr 2024 12:51:13 +0200 Subject: [PATCH 09/26] NodeKernel: integrate GSM and LoE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Also: * LoEEnabled: make payload strict to avoid NoThunks failures * LoE: allow to dynamically en-/disable * Depending on the state of the GSM, we want to either en- or disable the LoE. * Refactor `runGdd` to use `Watcher`, share trigger logic Previously, we would duplicate the logic for when to trigger the GDD between the NodeKernel and the peer simulator. * Add GDD tracing The `Show` instances are probably way too large ATM, but there are currently unused, so that isn't a pressing concern. * LoP rate: fix typo 500/s = 1/2ms, not 2/ms 🤦 --- .../ouroboros-consensus-diffusion.cabal | 1 + .../Ouroboros/Consensus/Node.hs | 33 ++-- .../Ouroboros/Consensus/Node/Genesis.hs | 112 ++++++++++++ .../Ouroboros/Consensus/Node/Tracers.hs | 5 + .../Ouroboros/Consensus/NodeKernel.hs | 33 +++- .../Test/ThreadNet/Network.hs | 2 + .../Consensus/PeerSimulator/NodeLifecycle.hs | 2 +- .../Test/Consensus/PeerSimulator/Run.hs | 17 +- .../Ouroboros/Consensus/Genesis/Governor.hs | 159 ++++++++++++------ .../Consensus/Storage/ChainDB/API.hs | 4 +- .../Consensus/Storage/ChainDB/Impl.hs | 5 +- .../Consensus/Storage/ChainDB/Impl/Args.hs | 2 +- .../Storage/ChainDB/Impl/ChainSel.hs | 11 +- .../Consensus/Storage/ChainDB/Impl/Types.hs | 2 +- .../Test/Util/ChainDB.hs | 2 +- 15 files changed, 297 insertions(+), 93 deletions(-) create mode 100644 ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index 39d63d5e69..3b3b0807e2 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -65,6 +65,7 @@ library Ouroboros.Consensus.Node.Exit Ouroboros.Consensus.Node.ExitPolicy Ouroboros.Consensus.Node.GSM + Ouroboros.Consensus.Node.Genesis Ouroboros.Consensus.Node.Recovery Ouroboros.Consensus.Node.RethrowPolicy Ouroboros.Consensus.Node.Tracers diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs index 9fc7c83b3c..c781ddf7f2 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs @@ -89,6 +89,9 @@ import Ouroboros.Consensus.Node.DbLock import Ouroboros.Consensus.Node.DbMarker import Ouroboros.Consensus.Node.ErrorPolicy import Ouroboros.Consensus.Node.ExitPolicy +import Ouroboros.Consensus.Node.Genesis (GenesisConfig (..), + GenesisNodeKernelArgs, GenesisSwitch (..), + mkGenesisNodeKernelArgs) import Ouroboros.Consensus.Node.GSM (GsmNodeKernelArgs (..)) import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.InitStorage @@ -193,6 +196,8 @@ data RunNodeArgs m addrNTN addrNTC blk (p2p :: Diffusion.P2P) = RunNodeArgs { , rnPeerSharing :: PeerSharing , rnGetUseBootstrapPeers :: STM m UseBootstrapPeers + + , rnGenesisConfig :: GenesisSwitch GenesisConfig } @@ -249,11 +254,7 @@ data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk -- | See 'NTN.ChainSyncTimeout' , llrnChainSyncTimeout :: m NTN.ChainSyncTimeout - -- | See 'CsClient.ChainSyncLoPBucketConfig' - , llrnChainSyncLoPBucketConfig :: ChainSyncLoPBucketConfig - - -- | See 'CsClient.CSJConfig' - , llrnCSJConfig :: CSJConfig + , llrnGenesisConfig :: GenesisSwitch GenesisConfig -- | How to run the data diffusion applications -- @@ -413,6 +414,9 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = llrnMaxClockSkew systemTime + (genesisArgs, setLoEinChainDbArgs) <- + mkGenesisNodeKernelArgs llrnGenesisConfig + let maybeValidateAll | lastShutDownWasClean = id @@ -428,7 +432,8 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = initLedger llrnMkHasFS llrnChainDbArgsDefaults - ( maybeValidateAll + ( setLoEinChainDbArgs + . maybeValidateAll . llrnCustomiseChainDbArgs ) @@ -474,6 +479,7 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = gsmMarkerFileView rnGetUseBootstrapPeers llrnPublicPeerSelectionStateVar + genesisArgs nodeKernel <- initNodeKernel nodeKernelArgs rnNodeKernelHook registry nodeKernel @@ -521,8 +527,12 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = (NTN.defaultCodecs codecConfig version encAddrNTN decAddrNTN) NTN.byteLimits llrnChainSyncTimeout - llrnChainSyncLoPBucketConfig - llrnCSJConfig + (case llrnGenesisConfig of + GenesisEnabled gcfg -> gcsChainSyncLoPBucketConfig gcfg + GenesisDisabled -> ChainSyncLoPBucketDisabled) + (case llrnGenesisConfig of + GenesisEnabled gcfg -> gcsCSJConfig gcfg + GenesisDisabled -> CSJDisabled) (reportMetric Diffusion.peerMetricsConfiguration peerMetrics) (NTN.mkHandlers nodeKernelArgs nodeKernel) @@ -711,6 +721,7 @@ mkNodeKernelArgs :: -> GSM.MarkerFileView m -> STM m UseBootstrapPeers -> StrictSTM.StrictTVar m (Diffusion.PublicPeerSelectionState addrNTN) + -> GenesisSwitch (GenesisNodeKernelArgs m blk) -> m (NodeKernelArgs m addrNTN (ConnectionId addrNTC) blk) mkNodeKernelArgs registry @@ -727,6 +738,7 @@ mkNodeKernelArgs gsmMarkerFileView getUseBootstrapPeers publicPeerSelectionStateVar + genesisArgs = do let (kaRng, psRng) = split rng return NodeKernelArgs @@ -751,6 +763,7 @@ mkNodeKernelArgs , keepAliveRng = kaRng , peerSharingRng = psRng , publicPeerSelectionStateVar + , genesisArgs } -- | We allow the user running the node to customise the 'NodeKernelArgs' @@ -852,6 +865,7 @@ stdLowLevelRunNodeArgsIO :: stdLowLevelRunNodeArgsIO RunNodeArgs{ rnProtocolInfo , rnEnableP2P , rnPeerSharing + , rnGenesisConfig } $(SafeWildCards.fields 'StdRunNodeArgs) = do llrnBfcSalt <- stdBfcSaltIO @@ -860,8 +874,7 @@ stdLowLevelRunNodeArgsIO RunNodeArgs{ rnProtocolInfo pure LowLevelRunNodeArgs { llrnBfcSalt , llrnChainSyncTimeout = fromMaybe Diffusion.defaultChainSyncTimeout srnChainSyncTimeout - , llrnChainSyncLoPBucketConfig = ChainSyncLoPBucketDisabled - , llrnCSJConfig = CSJDisabled + , llrnGenesisConfig = rnGenesisConfig , llrnCustomiseHardForkBlockchainTimeArgs = id , llrnGsmAntiThunderingHerd , llrnKeepAliveRng diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs new file mode 100644 index 0000000000..c221de18ce --- /dev/null +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs @@ -0,0 +1,112 @@ +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Ouroboros.Consensus.Node.Genesis ( + GenesisConfig (..) + , GenesisNodeKernelArgs (..) + , GenesisSwitch (..) + , defaultGenesisConfig + , mkGenesisNodeKernelArgs + , setGetLoEFragment + ) where + +import Control.Monad (join) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.MiniProtocol.ChainSync.Client + (CSJConfig (..), CSJEnabledConfig (..), + ChainSyncLoPBucketConfig (..), + ChainSyncLoPBucketEnabledConfig (..)) +import qualified Ouroboros.Consensus.Node.GsmState as GSM +import Ouroboros.Consensus.Storage.ChainDB (ChainDbArgs) +import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.AnchoredFragment (AnchoredFragment) +import qualified Ouroboros.Network.AnchoredFragment as AF + +-- We have multiple other Genesis-related types of a similar shape ('LoE', LoP +-- and CSJ configs), maybe unify? +data GenesisSwitch a = + GenesisDisabled + | GenesisEnabled !a + deriving stock (Show, Functor, Foldable, Traversable) + +-- | Aggregating the various configs for Genesis-related subcomponents. +data GenesisConfig = GenesisConfig { + gcsChainSyncLoPBucketConfig :: !ChainSyncLoPBucketConfig + , gcsCSJConfig :: !CSJConfig + } + +-- TODO justification/derivation from other parameters +defaultGenesisConfig :: GenesisConfig +defaultGenesisConfig = GenesisConfig { + gcsChainSyncLoPBucketConfig = ChainSyncLoPBucketEnabled ChainSyncLoPBucketEnabledConfig { + csbcCapacity = 100_000 -- number of tokens + , csbcRate = 500 -- tokens per second leaking, 1/2ms + } + , gcsCSJConfig = CSJEnabled CSJEnabledConfig { + csjcJumpSize = 3 * 2160 * 20 -- mainnet forecast range + } + } + +-- | Genesis-related arguments needed by the NodeKernel initialization logic. +data GenesisNodeKernelArgs m blk = GenesisNodeKernelArgs { + -- | A TVar containing an action that returns the 'ChainDB.GetLoEFragment' + -- action. We use this extra indirection to update this action after we + -- opened the ChainDB (which happens before we initialize the NodeKernel). + -- After that, this TVar will not be modified again. + gnkaGetLoEFragment :: !(StrictTVar m (ChainDB.GetLoEFragment m blk)) + } + +-- | Create the initial 'GenesisNodeKernelArgs" (with a temporary +-- 'ChainDB.GetLoEFragment' that will be replaced via 'setGetLoEFragment') and a +-- function to update the 'ChainDbArgs' accordingly. +mkGenesisNodeKernelArgs :: + forall m blk a. (IOLike m, GetHeader blk) + => GenesisSwitch a + -> m ( GenesisSwitch (GenesisNodeKernelArgs m blk) + , Complete ChainDbArgs m blk -> Complete ChainDbArgs m blk + ) +mkGenesisNodeKernelArgs = \case + GenesisDisabled -> pure (GenesisDisabled, id) + GenesisEnabled{} -> do + varGetLoEFragment <- newTVarIO $ pure $ + -- Use the most conservative LoE fragment until 'setGetLoEFragment' is + -- called. + ChainDB.LoEEnabled $ AF.Empty AF.AnchorGenesis + let getLoEFragment = join $ readTVarIO varGetLoEFragment + updateChainDbArgs cfg = cfg { ChainDB.cdbsArgs = + (ChainDB.cdbsArgs cfg) { ChainDB.cdbsLoE = getLoEFragment } + } + gnka = GenesisEnabled $ GenesisNodeKernelArgs varGetLoEFragment + pure (gnka, updateChainDbArgs) + +-- | Set 'gnkaGetLoEFragment' to the actual logic for determining the current +-- LoE fragment. +setGetLoEFragment :: + forall m blk. (IOLike m, GetHeader blk) + => STM m GSM.GsmState + -> STM m (AnchoredFragment (Header blk)) + -- ^ The LoE fragment. + -> GenesisNodeKernelArgs m blk + -> m () +setGetLoEFragment readGsmState readLoEFragment ctx = + atomically $ writeTVar (gnkaGetLoEFragment ctx) getLoEFragment + where + getLoEFragment :: ChainDB.GetLoEFragment m blk + getLoEFragment = atomically $ readGsmState >>= \case + -- When the HAA can currently not be guaranteed, we should not select + -- any blocks that would cause our immutable tip to advance, so we + -- return the most conservative LoE fragment. + GSM.PreSyncing -> + pure $ ChainDB.LoEEnabled $ AF.Empty AF.AnchorGenesis + -- When we are syncing, return the current LoE fragment. + GSM.Syncing -> + ChainDB.LoEEnabled <$> readLoEFragment + -- When we are caught up, the LoE is disabled. + GSM.CaughtUp -> + pure ChainDB.LoEDisabled diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Tracers.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Tracers.hs index bfe2a77849..59e4052d92 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Tracers.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Tracers.hs @@ -23,6 +23,7 @@ import Data.Time (UTCTime) import Ouroboros.Consensus.Block import Ouroboros.Consensus.BlockchainTime import Ouroboros.Consensus.Forecast (OutsideForecastRange) +import Ouroboros.Consensus.Genesis.Governor (TraceGDDEvent) import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Mempool (MempoolSize, TraceEventMempool) @@ -65,6 +66,7 @@ data Tracers' remotePeer localPeer blk f = Tracers , keepAliveClientTracer :: f (TraceKeepAliveClient remotePeer) , consensusErrorTracer :: f SomeException , gsmTracer :: f (TraceGsmEvent (Tip blk)) + , gddTracer :: f (TraceGDDEvent remotePeer blk) } instance (forall a. Semigroup (f a)) @@ -86,6 +88,7 @@ instance (forall a. Semigroup (f a)) , keepAliveClientTracer = f keepAliveClientTracer , consensusErrorTracer = f consensusErrorTracer , gsmTracer = f gsmTracer + , gddTracer = f gddTracer } where f :: forall a. Semigroup a @@ -115,6 +118,7 @@ nullTracers = Tracers , keepAliveClientTracer = nullTracer , consensusErrorTracer = nullTracer , gsmTracer = nullTracer + , gddTracer = nullTracer } showTracers :: ( Show blk @@ -147,6 +151,7 @@ showTracers tr = Tracers , keepAliveClientTracer = showTracing tr , consensusErrorTracer = showTracing tr , gsmTracer = showTracing tr + , gddTracer = showTracing tr } {------------------------------------------------------------------------------- diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index b4dd556373..591664d2be 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -1,11 +1,13 @@ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE DuplicateRecordFields #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -27,7 +29,6 @@ module Ouroboros.Consensus.NodeKernel ( ) where - import qualified Control.Concurrent.Class.MonadSTM as LazySTM import qualified Control.Concurrent.Class.MonadSTM.Strict as StrictSTM import Control.DeepSeq (force) @@ -52,6 +53,7 @@ import qualified Ouroboros.Consensus.Block as Block import Ouroboros.Consensus.BlockchainTime import Ouroboros.Consensus.Config import Ouroboros.Consensus.Forecast +import Ouroboros.Consensus.Genesis.Governor (gddWatcher) import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended @@ -65,6 +67,8 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client viewChainSyncState) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck (SomeHeaderInFutureCheck) +import Ouroboros.Consensus.Node.Genesis (GenesisNodeKernelArgs, + GenesisSwitch (..), setGetLoEFragment) import Ouroboros.Consensus.Node.GSM (GsmNodeKernelArgs (..)) import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.Run @@ -178,6 +182,7 @@ data NodeKernelArgs m addrNTN addrNTC blk = NodeKernelArgs { , peerSharingRng :: StdGen , publicPeerSelectionStateVar :: StrictSTM.StrictTVar m (PublicPeerSelectionState addrNTN) + , genesisArgs :: GenesisSwitch (GenesisNodeKernelArgs m blk) } initNodeKernel :: @@ -197,6 +202,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , gsmArgs , peerSharingRng , publicPeerSelectionStateVar + , genesisArgs } = do -- using a lazy 'TVar', 'BlockForging' does not have a 'NoThunks' instance. blockForgingVar :: LazySTM.TMVar m [BlockForging m blk] <- LazySTM.newTMVarIO [] @@ -247,7 +253,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , GSM.setCaughtUpPersistentMark = \upd -> (if upd then GSM.touchMarkerFile else GSM.removeMarkerFile) gsmMarkerFileView - , GSM.writeGsmState = \gsmState -> + , GSM.writeGsmState = \gsmState -> do atomicallyWithMonotonicTime $ \time -> do writeTVar varGsmState gsmState handles <- readTVar varChainSyncHandles @@ -269,6 +275,25 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers ps_POLICY_PEER_SHARE_STICKY_TIME ps_POLICY_PEER_SHARE_MAX_PEERS + case genesisArgs of + GenesisDisabled -> pure () + GenesisEnabled ctx -> do + varLoEFragment <- newTVarIO $ AF.Empty AF.AnchorGenesis + setGetLoEFragment + (readTVar varGsmState) + (readTVar varLoEFragment) + ctx + + void $ forkLinkedWatcher registry "NodeKernel.GDD" $ + gddWatcher + cfg + (gddTracer tracers) + chainDB + (readTVar varGsmState) + -- TODO GDD should only consider (big) ledger peers + (readTVar varChainSyncHandles) + varLoEFragment + void $ forkLinkedThread registry "NodeKernel.blockForging" $ blockForgingController st (LazySTM.takeTMVar blockForgingVar) @@ -344,11 +369,11 @@ initInternalState NodeKernelArgs { tracers, chainDB, registry, cfg } = do varGsmState <- do let GsmNodeKernelArgs {..} = gsmArgs - j <- GSM.initializationGsmState + gsmState <- GSM.initializationGsmState (atomically $ ledgerState <$> ChainDB.getCurrentLedger chainDB) gsmDurationUntilTooOld gsmMarkerFileView - newTVarIO j + newTVarIO gsmState varChainSyncHandles <- newTVarIO mempty mempool <- openMempool registry diff --git a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs index 90dffaa070..7e1b5abcfd 100644 --- a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs +++ b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs @@ -74,6 +74,7 @@ import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import qualified Ouroboros.Consensus.Network.NodeToNode as NTN import Ouroboros.Consensus.Node.ExitPolicy +import Ouroboros.Consensus.Node.Genesis (GenesisSwitch (..)) import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.InitStorage import Ouroboros.Consensus.Node.NetworkProtocolVersion @@ -1033,6 +1034,7 @@ runThreadNetwork systemTime ThreadNetworkArgs } , getUseBootstrapPeers = pure DontUseBootstrapPeers , publicPeerSelectionStateVar + , genesisArgs = GenesisDisabled } nodeKernel <- initNodeKernel nodeKernelArgs diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs index f83d1c32b5..3d6ea7d04e 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs @@ -135,7 +135,7 @@ mkChainDb resources = do , mcdbNodeDBs = lrCdb }) pure $ args { ChainDB.cdbsArgs = (ChainDB.cdbsArgs args) { - cdbsLoE = readTVarIO <$> lrLoEVar + cdbsLoE = traverse readTVarIO lrLoEVar } } (_, (chainDB, internal)) <- allocate lrRegistry diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index a5873f5a0f..e1376a8dfd 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -15,26 +15,26 @@ import Control.Monad.Class.MonadTime (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) import Data.Foldable (for_) -import Data.Functor (void) import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig (..)) -import Ouroboros.Consensus.Genesis.Governor (runGDDGovernor) +import Ouroboros.Consensus.Genesis.Governor (gddWatcher) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client (CSJConfig (..), CSJEnabledConfig (..), ChainDbView, ChainSyncClientHandle, ChainSyncLoPBucketConfig (..), - ChainSyncLoPBucketEnabledConfig (..), ChainSyncState (..), - viewChainSyncState) + ChainSyncLoPBucketEnabledConfig (..), viewChainSyncState) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient +import qualified Ouroboros.Consensus.Node.GsmState as GSM import Ouroboros.Consensus.Storage.ChainDB.API import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import Ouroboros.Consensus.Util.Condense (Condense (..)) import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.ResourceRegistry +import Ouroboros.Consensus.Util.STM (forkLinkedWatcher) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF import Ouroboros.Network.BlockFetch (FetchClientRegistry, @@ -364,16 +364,15 @@ startNode schedulerConfig genesisTest interval = do -- peer fragments than registered clients. BlockFetch.startBlockFetchLogic lrRegistry lrTracer lnChainDb fetchClientRegistry getCandidates - let gddTrigger = viewChainSyncState handles (\ s -> (csLatestSlot s, csIdling s)) for_ lrLoEVar $ \ var -> do - forkLinkedThread lrRegistry "LoE updater background" $ - void $ runGDDGovernor + forkLinkedWatcher lrRegistry "LoE updater background" $ + gddWatcher lrConfig (mkGDDTracerTestBlock lrTracer) + lnChainDb + (pure GSM.Syncing) -- TODO actually run GSM (readTVar handles) var - lnChainDb - gddTrigger where LiveResources {lrRegistry, lrTracer, lrConfig, lrPeerSim, lrLoEVar} = resources diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 7d515b0069..99df6870e4 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -6,6 +6,8 @@ {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} -- | Implementation of the GDD governor @@ -26,9 +28,10 @@ -- module Ouroboros.Consensus.Genesis.Governor ( DensityBounds (..) + , GDDStateView (..) , TraceGDDEvent (..) , densityDisconnect - , runGDDGovernor + , gddWatcher , sharedCandidatePrefix ) where @@ -39,7 +42,6 @@ import Data.Foldable (for_) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (maybeToList) -import Data.Void import Data.Word (Word64) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig, configLedger, @@ -55,61 +57,100 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client (ChainSyncClientHandle (..), ChainSyncState (..)) -import Ouroboros.Consensus.Storage.ChainDB.API (ChainDB, - triggerChainSelectionAsync) +import Ouroboros.Consensus.Node.GsmState +import Ouroboros.Consensus.Storage.ChainDB.API (ChainDB) import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import Ouroboros.Consensus.Util (eitherToMaybe, whenJust) import Ouroboros.Consensus.Util.AnchoredFragment (stripCommonPrefix) import Ouroboros.Consensus.Util.IOLike -import Ouroboros.Consensus.Util.STM (blockUntilChanged) +import Ouroboros.Consensus.Util.STM (Watcher (..)) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF --- | A never ending computation that evaluates the GDD rule whenever --- the STM action @getTrigger@ yields a different result, writing the LoE +-- | A 'Watcher' that evaluates the GDD rule whenever necessary, writing the LoE -- fragment to @varLoEFrag@, and then triggering ChainSel to reprocess all -- blocks that had previously been postponed by the LoE. -- -- Evaluating the GDD rule might cause peers to be disconnected if they have -- sparser chains than the best chain. --- --- The LoE fragment is the fragment anchored at the immutable tip and ending at --- the LoE tip. --- --- @getHandles@ is the callback to get the handles that allow to disconnect --- from peers. --- -runGDDGovernor :: - ( Monoid a, - Eq a, - HasHardForkHistory blk, - IOLike m, - LedgerSupportsProtocol blk, - Ord peer - ) => - TopLevelConfig blk -> - Tracer m (TraceGDDEvent peer blk) -> - STM m (Map peer (ChainSyncClientHandle m blk)) -> - StrictTVar m (AnchoredFragment (Header blk)) -> - ChainDB m blk -> - STM m a -> - m Void -runGDDGovernor cfg tracer getHandles varLoEFrag chainDb getTrigger = - spin mempty +gddWatcher :: + forall m blk peer. + ( IOLike m + , Ord peer + , LedgerSupportsProtocol blk + , HasHardForkHistory blk + ) + => TopLevelConfig blk + -> Tracer m (TraceGDDEvent peer blk) + -> ChainDB m blk + -> STM m GsmState + -> STM m (Map peer (ChainSyncClientHandle m blk)) + -- ^ The ChainSync handles. We trigger the GDD whenever our 'GsmState' + -- changes, and when 'Syncing', whenever any of the candidate fragments + -- changes. Also, we use this to disconnect from peers with insufficient + -- densities. + -> StrictTVar m (AnchoredFragment (Header blk)) + -- ^ The LoE fragment. It starts at a (recent) immutable tip and ends at + -- the common intersection of the candidate fragments. + -> Watcher m + (GsmState, GDDStateView m blk peer) + (Map peer (Maybe (WithOrigin SlotNo), Bool)) +gddWatcher cfg tracer chainDb getGsmState getHandles varLoEFrag = + Watcher { + wInitial = Nothing + , wReader = (,) <$> getGsmState <*> getGDDStateView + , wFingerprint + , wNotify + } where - spin oldTrigger = do - (newTrigger, curChain, curLedger) <- atomically $ do - (_, newTrigger) <- blockUntilChanged id oldTrigger getTrigger - curChain <- ChainDB.getCurrentChain chainDb - curLedger <- ChainDB.getCurrentLedger chainDb - pure (newTrigger, curChain, curLedger) - loeFrag <- evaluateGDD cfg tracer getHandles curChain curLedger - oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag - -- The chain selection only depends on the LoE tip, so there - -- is no point in retriggering it if the LoE tip hasn't changed. - when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ - triggerChainSelectionAsync chainDb - spin newTrigger + getGDDStateView :: STM m (GDDStateView m blk peer) + getGDDStateView = do + curChain <- ChainDB.getCurrentChain chainDb + immutableLedgerSt <- ChainDB.getImmutableLedger chainDb + handles <- getHandles + states <- traverse (readTVar . cschState) handles + pure GDDStateView { + gddCtxCurChain = curChain + , gddCtxImmutableLedgerSt = immutableLedgerSt + , gddCtxKillActions = Map.map cschGDDKill handles + , gddCtxStates = states + } + + wFingerprint :: + (GsmState, GDDStateView m blk peer) + -> Map peer (Maybe (WithOrigin SlotNo), Bool) + wFingerprint (gsmState, GDDStateView{gddCtxStates}) = case gsmState of + -- When we are in 'PreSyncing' (HAA not satisfied) or are caught up, we + -- don't have to run the GDD on changes to the candidate fragments. + -- (Maybe we want to do it in 'PreSycing'?) + PreSyncing -> Map.empty + CaughtUp -> Map.empty + -- When syncing, wake up regularly while headers are sent. + -- Watching csLatestSlot ensures that GDD is woken up when a peer is + -- sending headers even if they are after the forecast horizon. Note + -- that there can be some delay between the header being validated and + -- it becoming visible to GDD. It will be visible only when csLatestSlot + -- changes again or when csIdling changes, which is guaranteed to happen + -- eventually. + Syncing -> + Map.map (\css -> (csLatestSlot css, csIdling css)) gddCtxStates + + wNotify :: (GsmState, GDDStateView m blk peer) -> m () + wNotify (_gsmState, stateView) = do + loeFrag <- evaluateGDD cfg tracer stateView + oldLoEFrag <- atomically $ swapTVar varLoEFrag loeFrag + -- The chain selection only depends on the LoE tip, so there + -- is no point in retriggering it if the LoE tip hasn't changed. + when (AF.headHash oldLoEFrag /= AF.headHash loeFrag) $ + ChainDB.triggerChainSelectionAsync chainDb + +-- | Pure snapshot of the dynamic data the GDD operates on. +data GDDStateView m blk peer = GDDStateView { + gddCtxCurChain :: AnchoredFragment (Header blk) + , gddCtxImmutableLedgerSt :: ExtLedgerState blk + , gddCtxKillActions :: Map peer (m ()) + , gddCtxStates :: Map peer (ChainSyncState blk) + } -- | Disconnect peers that lose density comparisons and recompute the LoE fragment. -- @@ -135,21 +176,21 @@ evaluateGDD :: ) => TopLevelConfig blk -> Tracer m (TraceGDDEvent peer blk) - -> STM m (Map peer (ChainSyncClientHandle m blk)) - -> AnchoredFragment (Header blk) - -> ExtLedgerState blk + -> GDDStateView m blk peer -> m (AnchoredFragment (Header blk)) -evaluateGDD cfg tracer getHandles curChain immutableLedgerSt = do - (states, candidates, candidateSuffixes, handles, loeFrag) <- atomically $ do - handles <- getHandles - states <- traverse (readTVar . cschState) handles - let - candidates = csCandidate <$> states +evaluateGDD cfg tracer stateView = do + let GDDStateView { + gddCtxCurChain = curChain + , gddCtxImmutableLedgerSt = immutableLedgerSt + , gddCtxKillActions = killActions + , gddCtxStates = states + } = stateView + (loeFrag, candidateSuffixes) = sharedCandidatePrefix curChain candidates - pure (states, candidates, candidateSuffixes, handles, loeFrag) + candidates = csCandidate <$> states - let msgen :: Maybe GenesisWindow + msgen :: Maybe GenesisWindow -- This could also use 'runWithCachedSummary' if deemed desirable. msgen = eitherToMaybe $ runQuery qry summary where @@ -180,7 +221,7 @@ evaluateGDD cfg tracer getHandles curChain immutableLedgerSt = do traceWith tracer TraceGDDEvent {sgen, curChain, bounds, candidates, candidateSuffixes, losingPeers, loeHead} - for_ losingPeers $ \peer -> cschGDDKill (handles Map.! peer) + for_ losingPeers $ \peer -> killActions Map.! peer pure loeFrag @@ -224,6 +265,8 @@ data DensityBounds blk = idling :: Bool } +deriving stock instance (Show (Header blk), GetHeader blk) => Show (DensityBounds blk) + -- | @densityDisconnect genWin k states candidateSuffixes loeFrag@ -- yields the list of peers which are known to lose the density comparison with -- any other peer, when looking at the genesis window after @loeFrag@. @@ -399,3 +442,7 @@ data TraceGDDEvent peer blk = loeHead :: AF.Anchor (Header blk), sgen :: GenesisWindow } + +deriving stock instance + ( GetHeader blk, Show (Header blk), Show peer + ) => Show (TraceGDDEvent peer blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs index 9c756543fa..324be6ab47 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs @@ -884,7 +884,7 @@ data LoE a = LoEDisabled | -- | The LoE is enabled. - LoEEnabled a + LoEEnabled !a deriving (Eq, Show, Generic, NoThunks, Functor, Foldable, Traversable) -type GetLoEFragment m blk = LoE (m (AnchoredFragment (Header blk))) +type GetLoEFragment m blk = m (LoE (AnchoredFragment (Header blk))) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs index 143aad7375..d4c5e9c420 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs @@ -37,7 +37,7 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl ( import Control.Monad (when) import Control.Monad.Trans.Class (lift) import Control.Tracer -import Data.Functor ((<&>)) +import Data.Functor (void, (<&>)) import Data.Functor.Identity (Identity) import qualified Data.Map.Strict as Map import Data.Maybe.Strict (StrictMaybe (..)) @@ -148,6 +148,7 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do let initChainSelTracer = contramap TraceInitChainSelEvent tracer traceWith initChainSelTracer StartedInitChainSelection + initialLoE <- Args.cdbsLoE cdbSpecificArgs chainAndLedger <- ChainSel.initialChainSelection immutableDB volatileDB @@ -157,7 +158,7 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do varInvalid varFutureBlocks (Args.cdbsCheckInFuture cdbSpecificArgs) - (Args.cdbsLoE cdbSpecificArgs) + (void initialLoE) traceWith initChainSelTracer InitialChainSelected let chain = VF.validatedFragment chainAndLedger diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs index d8dc1db7b5..ebb6a04b7f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs @@ -118,7 +118,7 @@ defaultSpecificArgs = ChainDbSpecificArgs { , cdbsTracer = nullTracer , cdbsHasFSGsmDB = noDefault , cdbsTopLevelConfig = noDefault - , cdbsLoE = LoEDisabled + , cdbsLoE = pure LoEDisabled } -- | Default arguments diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 28792bca8a..156472c296 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -109,7 +109,7 @@ initialChainSelection :: -> StrictTVar m (WithFingerprint (InvalidBlocks blk)) -> StrictTVar m (FutureBlocks m blk) -> CheckInFuture m blk - -> LoE (m (AnchoredFragment (Header blk))) + -> LoE () -> m (ChainAndLedger blk) initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid varFutureBlocks futureCheck loE = do @@ -303,10 +303,9 @@ chainSelSync :: -- 'ChainSelReprocessLoEBlocks' whenever we receive a new header or lose a -- peer. -- If 'cdbLoE' is 'LoEDisabled', this task is skipped. -chainSelSync cdb@CDB{..} ChainSelReprocessLoEBlocks - | LoEDisabled <- cdbLoE = pure () - - | otherwise = do +chainSelSync cdb@CDB{..} ChainSelReprocessLoEBlocks = lift cdbLoE >>= \case + LoEDisabled -> pure () + LoEEnabled _ -> do (succsOf, chain) <- lift $ atomically $ do invalid <- forgetFingerprint <$> readTVar cdbInvalid (,) @@ -552,7 +551,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- on the implementation of @processLoE@. Nothing -> AF.Empty (AF.anchor curChain) - loeFrag <- traverse (fmap sanitizeLoEFrag) cdbLoE + loeFrag <- fmap sanitizeLoEFrag <$> cdbLoE traceWith addBlockTracer (ChainSelectionLoEDebug curChain loeFrag) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs index 84c70e8121..211f481038 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs @@ -268,7 +268,7 @@ data ChainDbEnv m blk = CDB -- The number of blocks from the future is bounded by the number of -- upstream peers multiplied by the max clock skew divided by the slot -- length. - , cdbLoE :: LoE (m (AnchoredFragment (Header blk))) + , cdbLoE :: m (LoE (AnchoredFragment (Header blk))) -- ^ Configure the Limit on Eagerness. If this is 'LoEEnabled', it contains -- an action that returns the LoE fragment, which indicates the latest rollback -- point, i.e. we are not allowed to select a chain from which we could not diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs index 3bb24cf4f0..85f644bbf7 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs @@ -129,6 +129,6 @@ fromMinimalChainDbArgs MinimalChainDbArgs {..} = ChainDbArgs { , cdbsRegistry = mcdbRegistry , cdbsTracer = nullTracer , cdbsTopLevelConfig = mcdbTopLevelConfig - , cdbsLoE = LoEDisabled + , cdbsLoE = pure LoEDisabled } } From 4b437c504cf3003b98bbe7aebd7c3b5fb38ab3d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Thu, 16 May 2024 11:58:19 +0000 Subject: [PATCH 10/26] Automated watcher of CSJ state invariants --- .../ouroboros-consensus-diffusion.cabal | 1 + .../Consensus/PeerSimulator/CSJInvariants.hs | 183 ++++++++++++++++++ .../Test/Consensus/PeerSimulator/Run.hs | 5 +- 3 files changed, 188 insertions(+), 1 deletion(-) create mode 100644 ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index 3b3b0807e2..2ecf0f6880 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -243,6 +243,7 @@ test-suite consensus-test Test.Consensus.Network.AnchoredFragment.Extras Test.Consensus.Node Test.Consensus.PeerSimulator.BlockFetch + Test.Consensus.PeerSimulator.CSJInvariants Test.Consensus.PeerSimulator.ChainSync Test.Consensus.PeerSimulator.Config Test.Consensus.PeerSimulator.Handlers diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs new file mode 100644 index 0000000000..7b83e04101 --- /dev/null +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs @@ -0,0 +1,183 @@ +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} + +-- | This module provides a watcher of the invariants that are specific to the +-- ChainSync jumping (CSJ) implementation. Those invariants are typically +-- documented in the codebase but are not checked in any way, yet they are +-- crucial for CSJ to work properly. This watcher monitors the ChainSync +-- handlers and throws a 'Violation' exception when an invariant stops holding. +-- It is intended for testing purposes. +module Test.Consensus.PeerSimulator.CSJInvariants ( + Violation + , watcher + ) where + +import Control.Monad (forM_, when) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Typeable (Typeable) +import Ouroboros.Consensus.Block (Point, StandardHash, castPoint) +import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State as CSState +import Ouroboros.Consensus.Util.IOLike (Exception, MonadSTM (STM), + MonadThrow (throwIO), StrictTVar, readTVar) +import Ouroboros.Consensus.Util.STM (Watcher (..)) + +-------------------------------------------------------------------------------- +-- Idealised view of the ChainSync client's state +-------------------------------------------------------------------------------- + +-- | Our idealised view of the ChainSync client's state with respect to +-- ChainSync jumping in particular. +type View peer blk = Map peer (State blk) + +-- | Idealised version of 'ChainSyncJumpingState'. +data State blk + = Dynamo + | Objector + -- | The point where the objector dissented with the dynamo when it was a + -- jumper. + !(Point blk) + | Disengaged + | Jumper !(JumperState blk) + deriving (Show, Eq) + +-- | Idealised version of 'ChainSyncJumpingJumperState'. +data JumperState blk + = Happy + -- | Latest accepted jump, if there is one + !(Maybe (Point blk)) + | LookingForIntersection + -- | Latest accepted jump + !(Point blk) + -- | Earliest rejected jump + !(Point blk) + | FoundIntersection + -- | Latest accepted jump + !(Point blk) + -- | Earliest rejected jump + !(Point blk) + deriving (Show, Eq) + +-------------------------------------------------------------------------------- +-- Invariants on views +-------------------------------------------------------------------------------- + +allInvariants :: [Invariant peer blk] +allInvariants = + [ thereIsAlwaysOneDynamoUnlessDisengaged, + thereIsAlwaysAtMostOneObjector + ] + +thereIsAlwaysOneDynamoUnlessDisengaged :: Invariant peer blk +thereIsAlwaysOneDynamoUnlessDisengaged = + Invariant + { name = "There is always one dynamo, unless all are disengaged", + check = \view -> + null (filter (not . isDisengaged) $ Map.elems view) + || length (filter isDynamo $ Map.elems view) == 1 + } + +thereIsAlwaysAtMostOneObjector :: Invariant peer blk +thereIsAlwaysAtMostOneObjector = + Invariant + { name = "There is always at most one objector", + check = \view -> + length (filter isObjector $ Map.elems view) <= 1 + } + +-------------------------------------------------------------------------------- +-- Helpers for the invariants +-------------------------------------------------------------------------------- + +isDynamo :: State blk -> Bool +isDynamo (Dynamo {}) = True +isDynamo _ = False + +isObjector :: State blk -> Bool +isObjector (Objector {}) = True +isObjector _ = False + +isDisengaged :: State blk -> Bool +isDisengaged (Disengaged {}) = True +isDisengaged _ = False + +-------------------------------------------------------------------------------- +-- Invariant enforcement implementation +-------------------------------------------------------------------------------- + +readAndView :: + forall m peer blk. + ( MonadSTM m + ) => + StrictTVar m (Map peer (CSState.ChainSyncClientHandle m blk)) -> + STM m (View peer blk) +readAndView handles = + traverse (fmap idealiseState . readTVar . CSState.cschJumping) =<< readTVar handles + where + -- Idealise the state of a ChainSync peer with respect to ChainSync jumping. + -- In particular, we get rid of non-comparable information such as the TVars + -- it may contain. + idealiseState :: CSState.ChainSyncJumpingState m blk -> State blk + idealiseState (CSState.Dynamo {}) = Dynamo + idealiseState (CSState.Objector _ point _) = Objector $ idealiseJumpInfo point + idealiseState (CSState.Disengaged _) = Disengaged + idealiseState (CSState.Jumper _ state) = Jumper $ idealiseJumperState state + -- Idealise the jumper state by stripping away everything that is more of a + -- technical necessity and not actually relevant for the invariants. + idealiseJumperState :: CSState.ChainSyncJumpingJumperState blk -> JumperState blk + idealiseJumperState (CSState.Happy _ lastAccepted) = Happy $ idealiseJumpInfo <$> lastAccepted + idealiseJumperState (CSState.LookingForIntersection lastAccepted firstRejected) = + LookingForIntersection (idealiseJumpInfo lastAccepted) (idealiseJumpInfo firstRejected) + idealiseJumperState (CSState.FoundIntersection _ lastAccepted firstRejected) = + FoundIntersection (idealiseJumpInfo lastAccepted) (castPoint firstRejected) + -- Jumpers actually carry a lot of information regarding the jump. From our + -- idealised point of view, we only care about the points where the jumpers + -- agree or disagree with the dynamo. + idealiseJumpInfo :: CSState.JumpInfo blk -> Point blk + idealiseJumpInfo = CSState.jMostRecentIntersection + +-- | The type of an invariant. Basically a glorified pair of a name and a check +-- function. +data Invariant peer blk = Invariant + { name :: !String, + check :: !(View peer blk -> Bool) + } + +-- | An exception that is thrown when an invariant is violated. It carries the +-- name of the invariant and the view of the state that triggered the invariant +-- violation. +data Violation peer blk = Violation !String !(View peer blk) + deriving (Eq, Show) + +instance + ( Typeable blk, + StandardHash blk, + Eq peer, + Show peer, + Typeable peer + ) => + Exception (Violation peer blk) + +-- | The watcher of ChainSync jumping invariants. It receives the ChainSync +-- handles and monitors them for changes. When a change is detected, it runs all +-- the invariants and throws 'Violation' if any of the invariants is violated. +watcher :: + ( MonadSTM m, + MonadThrow m, + Eq peer, + Show peer, + Typeable peer, + Typeable blk, + StandardHash blk + ) => + StrictTVar m (Map peer (CSState.ChainSyncClientHandle m blk)) -> + Watcher m (View peer blk) (View peer blk) +watcher handles = + Watcher + { wFingerprint = id, + wInitial = Nothing, + wReader = readAndView handles, + wNotify = + forM_ allInvariants . \view Invariant {name, check} -> + when (not $ check view) $ throwIO $ Violation name view + } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index e1376a8dfd..a8ff5e01ef 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -10,7 +10,7 @@ module Test.Consensus.PeerSimulator.Run ( , runPointSchedule ) where -import Control.Monad (foldM, forM) +import Control.Monad (foldM, forM, void) import Control.Monad.Class.MonadTime (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) @@ -47,6 +47,7 @@ import Ouroboros.Network.Util.ShowProxy (ShowProxy) import qualified Test.Consensus.PeerSimulator.BlockFetch as BlockFetch import qualified Test.Consensus.PeerSimulator.ChainSync as ChainSync import Test.Consensus.PeerSimulator.Config +import qualified Test.Consensus.PeerSimulator.CSJInvariants as CSJInvariants import Test.Consensus.PeerSimulator.NodeLifecycle import Test.Consensus.PeerSimulator.Resources import Test.Consensus.PeerSimulator.StateDiagram @@ -373,6 +374,8 @@ startNode schedulerConfig genesisTest interval = do (pure GSM.Syncing) -- TODO actually run GSM (readTVar handles) var + + void $ forkLinkedWatcher lrRegistry "CSJ invariants watcher" $ CSJInvariants.watcher handles where LiveResources {lrRegistry, lrTracer, lrConfig, lrPeerSim, lrLoEVar} = resources From f69fd1f7b7dcb8ef42d3970e08e3de77962fbfe8 Mon Sep 17 00:00:00 2001 From: Alexander Esgen Date: Thu, 16 May 2024 16:53:42 +0200 Subject: [PATCH 11/26] Allow to disable *all* Genesis components individually Previously, it wasn't possible to eg run *just* CSJ. --- .../Ouroboros/Consensus/Node.hs | 19 ++--- .../Ouroboros/Consensus/Node/Genesis.hs | 83 +++++++++++-------- .../Ouroboros/Consensus/NodeKernel.hs | 14 ++-- .../Test/ThreadNet/Network.hs | 6 +- 4 files changed, 67 insertions(+), 55 deletions(-) diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs index c781ddf7f2..f6cc42eea1 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs @@ -80,8 +80,6 @@ import Ouroboros.Consensus.Fragment.InFuture (CheckInFuture, ClockSkew) import qualified Ouroboros.Consensus.Fragment.InFuture as InFuture import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState (..)) -import Ouroboros.Consensus.MiniProtocol.ChainSync.Client - (CSJConfig (..), ChainSyncLoPBucketConfig (..)) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import qualified Ouroboros.Consensus.Network.NodeToClient as NTC import qualified Ouroboros.Consensus.Network.NodeToNode as NTN @@ -90,8 +88,7 @@ import Ouroboros.Consensus.Node.DbMarker import Ouroboros.Consensus.Node.ErrorPolicy import Ouroboros.Consensus.Node.ExitPolicy import Ouroboros.Consensus.Node.Genesis (GenesisConfig (..), - GenesisNodeKernelArgs, GenesisSwitch (..), - mkGenesisNodeKernelArgs) + GenesisNodeKernelArgs, mkGenesisNodeKernelArgs) import Ouroboros.Consensus.Node.GSM (GsmNodeKernelArgs (..)) import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.InitStorage @@ -197,7 +194,7 @@ data RunNodeArgs m addrNTN addrNTC blk (p2p :: Diffusion.P2P) = RunNodeArgs { , rnGetUseBootstrapPeers :: STM m UseBootstrapPeers - , rnGenesisConfig :: GenesisSwitch GenesisConfig + , rnGenesisConfig :: GenesisConfig } @@ -254,7 +251,7 @@ data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk -- | See 'NTN.ChainSyncTimeout' , llrnChainSyncTimeout :: m NTN.ChainSyncTimeout - , llrnGenesisConfig :: GenesisSwitch GenesisConfig + , llrnGenesisConfig :: GenesisConfig -- | How to run the data diffusion applications -- @@ -527,12 +524,8 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = (NTN.defaultCodecs codecConfig version encAddrNTN decAddrNTN) NTN.byteLimits llrnChainSyncTimeout - (case llrnGenesisConfig of - GenesisEnabled gcfg -> gcsChainSyncLoPBucketConfig gcfg - GenesisDisabled -> ChainSyncLoPBucketDisabled) - (case llrnGenesisConfig of - GenesisEnabled gcfg -> gcsCSJConfig gcfg - GenesisDisabled -> CSJDisabled) + (gcChainSyncLoPBucketConfig llrnGenesisConfig) + (gcCSJConfig llrnGenesisConfig) (reportMetric Diffusion.peerMetricsConfiguration peerMetrics) (NTN.mkHandlers nodeKernelArgs nodeKernel) @@ -721,7 +714,7 @@ mkNodeKernelArgs :: -> GSM.MarkerFileView m -> STM m UseBootstrapPeers -> StrictSTM.StrictTVar m (Diffusion.PublicPeerSelectionState addrNTN) - -> GenesisSwitch (GenesisNodeKernelArgs m blk) + -> GenesisNodeKernelArgs m blk -> m (NodeKernelArgs m addrNTN (ConnectionId addrNTC) blk) mkNodeKernelArgs registry diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs index c221de18ce..032b51e78a 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/Genesis.hs @@ -1,19 +1,24 @@ {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Consensus.Node.Genesis ( + -- * 'GenesisConfig' GenesisConfig (..) + , LoEAndGDDConfig (..) + , disableGenesisConfig + , enableGenesisConfigDefault + -- * NodeKernel helpers , GenesisNodeKernelArgs (..) - , GenesisSwitch (..) - , defaultGenesisConfig , mkGenesisNodeKernelArgs , setGetLoEFragment ) where import Control.Monad (join) +import Data.Traversable (for) import Ouroboros.Consensus.Block import Ouroboros.Consensus.MiniProtocol.ChainSync.Client (CSJConfig (..), CSJEnabledConfig (..), @@ -28,29 +33,39 @@ import Ouroboros.Consensus.Util.IOLike import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF --- We have multiple other Genesis-related types of a similar shape ('LoE', LoP --- and CSJ configs), maybe unify? -data GenesisSwitch a = - GenesisDisabled - | GenesisEnabled !a +-- | Whether to en-/disable the Limit on Eagerness and the Genesis Density +-- Disconnector. +data LoEAndGDDConfig a = + LoEAndGDDEnabled !a + | LoEAndGDDDisabled deriving stock (Show, Functor, Foldable, Traversable) -- | Aggregating the various configs for Genesis-related subcomponents. data GenesisConfig = GenesisConfig { - gcsChainSyncLoPBucketConfig :: !ChainSyncLoPBucketConfig - , gcsCSJConfig :: !CSJConfig + gcChainSyncLoPBucketConfig :: !ChainSyncLoPBucketConfig + , gcCSJConfig :: !CSJConfig + , gcLoEAndGDDConfig :: !(LoEAndGDDConfig ()) } -- TODO justification/derivation from other parameters -defaultGenesisConfig :: GenesisConfig -defaultGenesisConfig = GenesisConfig { - gcsChainSyncLoPBucketConfig = ChainSyncLoPBucketEnabled ChainSyncLoPBucketEnabledConfig { +enableGenesisConfigDefault :: GenesisConfig +enableGenesisConfigDefault = GenesisConfig { + gcChainSyncLoPBucketConfig = ChainSyncLoPBucketEnabled ChainSyncLoPBucketEnabledConfig { csbcCapacity = 100_000 -- number of tokens , csbcRate = 500 -- tokens per second leaking, 1/2ms } - , gcsCSJConfig = CSJEnabled CSJEnabledConfig { + , gcCSJConfig = CSJEnabled CSJEnabledConfig { csjcJumpSize = 3 * 2160 * 20 -- mainnet forecast range } + , gcLoEAndGDDConfig = LoEAndGDDEnabled () + } + +-- | Disable all Genesis components, yielding Praos behavior. +disableGenesisConfig :: GenesisConfig +disableGenesisConfig = GenesisConfig { + gcChainSyncLoPBucketConfig = ChainSyncLoPBucketDisabled + , gcCSJConfig = CSJDisabled + , gcLoEAndGDDConfig = LoEAndGDDDisabled } -- | Genesis-related arguments needed by the NodeKernel initialization logic. @@ -59,31 +74,33 @@ data GenesisNodeKernelArgs m blk = GenesisNodeKernelArgs { -- action. We use this extra indirection to update this action after we -- opened the ChainDB (which happens before we initialize the NodeKernel). -- After that, this TVar will not be modified again. - gnkaGetLoEFragment :: !(StrictTVar m (ChainDB.GetLoEFragment m blk)) + gnkaGetLoEFragment :: !(LoEAndGDDConfig (StrictTVar m (ChainDB.GetLoEFragment m blk))) } -- | Create the initial 'GenesisNodeKernelArgs" (with a temporary -- 'ChainDB.GetLoEFragment' that will be replaced via 'setGetLoEFragment') and a -- function to update the 'ChainDbArgs' accordingly. mkGenesisNodeKernelArgs :: - forall m blk a. (IOLike m, GetHeader blk) - => GenesisSwitch a - -> m ( GenesisSwitch (GenesisNodeKernelArgs m blk) + forall m blk. (IOLike m, GetHeader blk) + => GenesisConfig + -> m ( GenesisNodeKernelArgs m blk , Complete ChainDbArgs m blk -> Complete ChainDbArgs m blk ) -mkGenesisNodeKernelArgs = \case - GenesisDisabled -> pure (GenesisDisabled, id) - GenesisEnabled{} -> do - varGetLoEFragment <- newTVarIO $ pure $ - -- Use the most conservative LoE fragment until 'setGetLoEFragment' is - -- called. - ChainDB.LoEEnabled $ AF.Empty AF.AnchorGenesis - let getLoEFragment = join $ readTVarIO varGetLoEFragment - updateChainDbArgs cfg = cfg { ChainDB.cdbsArgs = - (ChainDB.cdbsArgs cfg) { ChainDB.cdbsLoE = getLoEFragment } - } - gnka = GenesisEnabled $ GenesisNodeKernelArgs varGetLoEFragment - pure (gnka, updateChainDbArgs) +mkGenesisNodeKernelArgs gcfg = do + gnkaGetLoEFragment <- for (gcLoEAndGDDConfig gcfg) $ \() -> + newTVarIO $ pure $ + -- Use the most conservative LoE fragment until 'setGetLoEFragment' + -- is called. + ChainDB.LoEEnabled $ AF.Empty AF.AnchorGenesis + let updateChainDbArgs = case gnkaGetLoEFragment of + LoEAndGDDDisabled -> id + LoEAndGDDEnabled varGetLoEFragment -> \cfg -> + cfg { ChainDB.cdbsArgs = + (ChainDB.cdbsArgs cfg) { ChainDB.cdbsLoE = getLoEFragment } + } + where + getLoEFragment = join $ readTVarIO varGetLoEFragment + pure (GenesisNodeKernelArgs {gnkaGetLoEFragment}, updateChainDbArgs) -- | Set 'gnkaGetLoEFragment' to the actual logic for determining the current -- LoE fragment. @@ -92,10 +109,10 @@ setGetLoEFragment :: => STM m GSM.GsmState -> STM m (AnchoredFragment (Header blk)) -- ^ The LoE fragment. - -> GenesisNodeKernelArgs m blk + -> StrictTVar m (ChainDB.GetLoEFragment m blk) -> m () -setGetLoEFragment readGsmState readLoEFragment ctx = - atomically $ writeTVar (gnkaGetLoEFragment ctx) getLoEFragment +setGetLoEFragment readGsmState readLoEFragment varGetLoEFragment = + atomically $ writeTVar varGetLoEFragment getLoEFragment where getLoEFragment :: ChainDB.GetLoEFragment m blk getLoEFragment = atomically $ readGsmState >>= \case diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index 591664d2be..0b16a1ae3d 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -67,8 +67,8 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client viewChainSyncState) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck (SomeHeaderInFutureCheck) -import Ouroboros.Consensus.Node.Genesis (GenesisNodeKernelArgs, - GenesisSwitch (..), setGetLoEFragment) +import Ouroboros.Consensus.Node.Genesis (GenesisNodeKernelArgs (..), + LoEAndGDDConfig (..), setGetLoEFragment) import Ouroboros.Consensus.Node.GSM (GsmNodeKernelArgs (..)) import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.Run @@ -182,7 +182,7 @@ data NodeKernelArgs m addrNTN addrNTC blk = NodeKernelArgs { , peerSharingRng :: StdGen , publicPeerSelectionStateVar :: StrictSTM.StrictTVar m (PublicPeerSelectionState addrNTN) - , genesisArgs :: GenesisSwitch (GenesisNodeKernelArgs m blk) + , genesisArgs :: GenesisNodeKernelArgs m blk } initNodeKernel :: @@ -275,14 +275,14 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers ps_POLICY_PEER_SHARE_STICKY_TIME ps_POLICY_PEER_SHARE_MAX_PEERS - case genesisArgs of - GenesisDisabled -> pure () - GenesisEnabled ctx -> do + case gnkaGetLoEFragment genesisArgs of + LoEAndGDDDisabled -> pure () + LoEAndGDDEnabled varGetLoEFragment -> do varLoEFragment <- newTVarIO $ AF.Empty AF.AnchorGenesis setGetLoEFragment (readTVar varGsmState) (readTVar varLoEFragment) - ctx + varGetLoEFragment void $ forkLinkedWatcher registry "NodeKernel.GDD" $ gddWatcher diff --git a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs index 7e1b5abcfd..eb4b978a1f 100644 --- a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs +++ b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs @@ -74,7 +74,7 @@ import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import qualified Ouroboros.Consensus.Network.NodeToNode as NTN import Ouroboros.Consensus.Node.ExitPolicy -import Ouroboros.Consensus.Node.Genesis (GenesisSwitch (..)) +import Ouroboros.Consensus.Node.Genesis import qualified Ouroboros.Consensus.Node.GSM as GSM import Ouroboros.Consensus.Node.InitStorage import Ouroboros.Consensus.Node.NetworkProtocolVersion @@ -1034,7 +1034,9 @@ runThreadNetwork systemTime ThreadNetworkArgs } , getUseBootstrapPeers = pure DontUseBootstrapPeers , publicPeerSelectionStateVar - , genesisArgs = GenesisDisabled + , genesisArgs = GenesisNodeKernelArgs { + gnkaGetLoEFragment = LoEAndGDDDisabled + } } nodeKernel <- initNodeKernel nodeKernelArgs From 627634f4e9aa3db3a12593abe3eb7a8adfe9d2be Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Mon, 13 May 2024 18:01:37 +0200 Subject: [PATCH 12/26] Simplify the honest peer shrinking function Modify the honest shrinking function by no longer speeding up the other schedules when an honest tick is deleted. This simplifies a lot of code in the `Shrinking` module, at the cost of no longer ensuring that shrunk schedules preserve the overall order of events. Additionally, we re-enable shrinking in CSJ tests Also: * Extend adversarial schedules when shrinking an honest one * Document the cases when we don't use shrinking --- .../Test/Consensus/Genesis/Tests/CSJ.hs | 7 +- .../Test/Consensus/Genesis/Tests/LoE.hs | 3 + .../Test/Consensus/Genesis/Tests/LoP.hs | 3 + .../PeerSimulator/Tests/LinkedThreads.hs | 1 + .../Consensus/PeerSimulator/Tests/Rollback.hs | 2 + .../Consensus/PeerSimulator/Tests/Timeouts.hs | 3 + .../Test/Consensus/PointSchedule/Shrinking.hs | 103 ++++++------------ .../PointSchedule/Shrinking/Tests.hs | 17 +-- 8 files changed, 52 insertions(+), 87 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index d333eec6ee..c0a3c47dc9 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -22,6 +22,7 @@ import Test.Consensus.PeerSimulator.Trace (TraceEvent (..)) import Test.Consensus.PointSchedule import qualified Test.Consensus.PointSchedule.Peers as Peers import Test.Consensus.PointSchedule.Peers (Peers (..), peers') +import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Tasty import Test.Tasty.QuickCheck import Test.Util.Orphans.IOLike () @@ -88,11 +89,7 @@ prop_CSJ happy synchronized = , scEnableLoP = True } ) - ( -- NOTE: Shrinking makes the tests fail because the peers reject jumps - -- because their TP is G. This makes them into objectors and they then - -- start serving headers. - \_ _ -> [] - ) + shrinkPeerSchedules ( \gt StateView{svTrace} -> let -- The list of 'TraceDownloadedHeader' events that are not newer than diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index c3cd74a925..0aecf4a182 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -45,6 +45,9 @@ tests = -- NOTE: Same as 'LoP.prop_delayAttack' with timeouts instead of LoP. prop_adversaryHitsTimeouts :: Bool -> Property prop_adversaryHitsTimeouts timeoutsEnabled = + -- Here we can't shrink because we exploit the properties of the point schedule to wait + -- at the end of the test for the adversaries to get disconnected, by adding an extra point. + -- If this point gets removed by the shrinker, we lose that property and the test becomes useless. noShrinking $ forAllGenesisTest ( do diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index 0c861ebe9f..5e27804094 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -180,6 +180,9 @@ prop_serve mustTimeout = -- NOTE: Same as 'LoE.prop_adversaryHitsTimeouts' with LoP instead of timeouts. prop_delayAttack :: Bool -> Property prop_delayAttack lopEnabled = + -- Here we can't shrink because we exploit the properties of the point schedule to wait + -- at the end of the test for the adversaries to get disconnected, by adding an extra point. + -- If this point gets removed by the shrinker, we lose that property and the test becomes useless. noShrinking $ forAllGenesisTest ( do diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs index 9a79bbab6e..d17869d726 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs @@ -44,6 +44,7 @@ prop_chainSyncKillsBlockFetch = do pure $ gt $> schedule ) defaultSchedulerConfig + -- No shrinking because the schedule is tiny and hand-crafted (\_ _ -> []) ( \_ stateView@StateView {svTipBlock} -> svTipBlock == Nothing diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs index 9ee67577e3..cd327b0b5f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs @@ -57,6 +57,7 @@ prop_rollback = do defaultSchedulerConfig + -- No shrinking because the schedule is tiny and hand-crafted (\_ _ -> []) (\_ -> not . hashOnTrunk . AF.headHash . svSelectedChain) @@ -73,6 +74,7 @@ prop_cannotRollback = defaultSchedulerConfig + -- No shrinking because the schedule is tiny and hand-crafted (\_ _ -> []) (\_ -> hashOnTrunk . AF.headHash . svSelectedChain) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs index 25f64ca973..25911f2b23 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs @@ -44,6 +44,9 @@ prop_timeouts mustTimeout = do -- Timeouts are enabled by default defaultSchedulerConfig + -- Here we can't shrink because we exploit the properties of the point schedule to wait + -- at the end of the test for the adversaries to get disconnected, by adding an extra point. + -- If this point gets removed by the shrinker, we lose that property and the test becomes useless. (\_ _ -> []) (\_ stateView -> diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index a574077634..6abab846ce 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -5,17 +5,16 @@ module Test.Consensus.PointSchedule.Shrinking ( -- | Exported only for testing (that is, checking the properties of the function) shrinkByRemovingAdversaries , shrinkHonestPeer + , shrinkHonestPeers , shrinkPeerSchedules ) where import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Containers.ListUtils (nubOrd) -import Data.Function ((&)) import Data.Functor ((<&>)) -import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map -import Data.Maybe (mapMaybe, maybeToList) +import Data.Maybe (mapMaybe) import Ouroboros.Network.AnchoredFragment (AnchoredFragment, AnchoredSeq (Empty), takeWhileOldest) import Test.Consensus.BlockTree (BlockTree (..), BlockTreeBranch (..), @@ -27,7 +26,6 @@ import Test.Consensus.PointSchedule (GenesisTest (..), import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (shrinkList) -import Test.Util.PartialAccessors import Test.Util.TestBlock (TestBlock, isAncestorOf, isStrictAncestorOf) @@ -42,7 +40,7 @@ shrinkPeerSchedules :: shrinkPeerSchedules genesisTest _stateView = let trimmedBlockTree sch = trimBlockTree' sch (gtBlockTree genesisTest) shrunkAdversarialPeers = - shrinkAdversarialPeers shrinkPeerSchedule (gtSchedule genesisTest) + shrinkAdversarialPeers shrinkAdversarialPeer (gtSchedule genesisTest) <&> \shrunkSchedule -> genesisTest { gtSchedule = shrunkSchedule, @@ -69,8 +67,8 @@ shrinkByRemovingAdversaries genesisTest _stateView = -- | Shrink a 'PeerSchedule' by removing ticks from it. The other ticks are kept -- unchanged. -shrinkPeerSchedule :: (PeerSchedule blk) -> [PeerSchedule blk] -shrinkPeerSchedule = shrinkList (const []) +shrinkAdversarialPeer :: (PeerSchedule blk) -> [PeerSchedule blk] +shrinkAdversarialPeer = shrinkList (const []) -- | Shrink the 'others' field of a 'Peers' structure by attempting to remove -- peers or by shrinking their values using the given shrinking function. @@ -82,37 +80,36 @@ shrinkAdversarialPeers shrink Peers {honestPeers, adversarialPeers} = -- | Shrinks honest peers by removing ticks. Because we are manipulating -- 'PeerSchedule' at this point, there is no proper notion of a tick. Instead, --- we remove points from the honest 'PeerSchedule', and move all other points --- sooner, including those on the other schedules. We check that this operation --- neither changes the final state of the honest peer, nor removes points from --- the other schedules. +-- we remove points from the honest 'PeerSchedule', and move all other points sooner. +-- +-- We check that this operation does not changes the final state of the honest peer, +-- that is, it keeps the same final tip point, header point, and block point. +-- +-- NOTE: This operation makes the honest peer to end its schedule sooner, which *may* +-- trigger disconnections when the timeout for MsgAwaitReply is reached. In those cases, +-- it is probably more pertinent to disable this timeout in tests than to disable shrinking. shrinkHonestPeers :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] -shrinkHonestPeers Peers {honestPeers, adversarialPeers} = - Map.toList honestPeers - & concatMap - ( \(n, schedule) -> - shrinkTheHonestPeer schedule (Map.delete n honestPeers) adversarialPeers - & map - ( \(schedule', otherHonestPeers', otherAdversarialPeers') -> - Peers - { honestPeers = Map.insert n schedule' otherHonestPeers', - adversarialPeers = otherAdversarialPeers' - } - ) - ) +shrinkHonestPeers Peers {honestPeers, adversarialPeers} = do + (k, honestSch) <- Map.toList honestPeers + let (lastHonest, _) = last honestSch + shrunk <- shrinkHonestPeer honestSch + pure $ Peers + { honestPeers = Map.insert k shrunk honestPeers + , adversarialPeers = fmap (extendAdversary lastHonest) adversarialPeers + } + where + -- Add an extra point at the end of the adversarial schedule if the honest one + -- was longer than it. Preserves the total duration of the simulation, so that + -- timeouts/LoP disconnections can still happen. + extendAdversary tLast = \case + [] -> [] + ps -> case last ps of + (t, p) | t < tLast -> ps ++ [(tLast, p)] + _ -> ps -shrinkTheHonestPeer :: - PeerSchedule blk -> - Map Int (PeerSchedule blk) -> - Map Int (PeerSchedule blk) -> - [(PeerSchedule blk, Map Int (PeerSchedule blk), Map Int (PeerSchedule blk))] -shrinkTheHonestPeer theSchedule otherHonestPeers otherAdversarialPeers = do - (at, speedUpBy) <- splits - maybeToList $ do - theSchedule' <- speedUpTheSchedule at speedUpBy theSchedule - otherHonestPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherHonestPeers - otherAdversarialPeers' <- mapM (speedUpOtherSchedule at speedUpBy) otherAdversarialPeers - pure (theSchedule', otherHonestPeers', otherAdversarialPeers') +shrinkHonestPeer :: PeerSchedule blk -> [PeerSchedule blk] +shrinkHonestPeer sch = + mapMaybe (speedUpTheSchedule sch) splits where -- | A list of non-zero time intervals between successive points of the honest schedule splits :: [(Time, DiffTime)] @@ -122,20 +119,7 @@ shrinkTheHonestPeer theSchedule otherHonestPeers otherAdversarialPeers = do then Nothing else Just (t1, diffTime t2 t1) ) - (zip theSchedule (drop 1 theSchedule)) - --- | For testing purposes only. Assumes there is exactly one honest peer and --- shrinks it. -shrinkHonestPeer :: PeersSchedule blk -> [PeersSchedule blk] -shrinkHonestPeer Peers {honestPeers, adversarialPeers} = - shrinkTheHonestPeer (getHonestPeer honestPeers) Map.empty adversarialPeers - & map - ( \(schedule', _, otherAdversarialPeers') -> - Peers - { honestPeers = Map.singleton 1 schedule', - adversarialPeers = otherAdversarialPeers' - } - ) + (zip sch (drop 1 sch)) -- | Speeds up _the_ schedule (that is, the one that we are actually trying to -- speed up) after `at` time, by `speedUpBy`. This "speeding up" is done by @@ -143,8 +127,8 @@ shrinkHonestPeer Peers {honestPeers, adversarialPeers} = -- they fall before `at`. We check that the operation doesn't change the final -- state of the peer, i.e. it doesn't remove all TP, HP, and BP in the sped up -- part. -speedUpTheSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpTheSchedule at speedUpBy sch = +speedUpTheSchedule :: PeerSchedule blk -> (Time, DiffTime) -> Maybe (PeerSchedule blk) +speedUpTheSchedule sch (at, speedUpBy) = if stillValid then Just $ beforeSplit ++ spedUpSchedule else Nothing where (beforeSplit, afterSplit) = span ((< at) . fst) sch @@ -160,21 +144,6 @@ speedUpTheSchedule at speedUpBy sch = hasHP = any (\case (_, ScheduleHeaderPoint _) -> True; _ -> False) hasBP = any (\case (_, ScheduleBlockPoint _) -> True; _ -> False) --- | Speeds up the other schedules after `at` time, by `speedUpBy`. This --- "speeding up" is done by removing `speedUpBy` to all points after `at`. We --- check that the schedule had no points between `at` and `at + speedUpBy`. We --- also keep the last point where it is, so that the end time stays the same. -speedUpOtherSchedule :: Time -> DiffTime -> PeerSchedule blk -> Maybe (PeerSchedule blk) -speedUpOtherSchedule at speedUpBy sch = - if losesPoint then Nothing else Just $ beforeSplit ++ spedUpSchedule ++ lastPoint - where - (beforeSplit, afterSplit) = span ((< at) . fst) sch - spedUpSchedule = map (\(t, p) -> (addTime (-speedUpBy) t, p)) $ take (length afterSplit - 1) afterSplit - losesPoint = any ((< (addTime speedUpBy at)) . fst) afterSplit - lastPoint = case afterSplit of - [] -> [] - as -> [last as] - -- | Remove blocks from the given block tree that are not necessary for the -- given peer schedules. If entire branches are unused, they are removed. If the -- trunk is unused, then it remains as an empty anchored fragment. diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index ed3b1a0003..311549a7be 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -12,7 +12,7 @@ import Test.Consensus.Genesis.Tests.Uniform (genUniformSchedulePoints) import Test.Consensus.PointSchedule (PeerSchedule, PeersSchedule, prettyPeersSchedule) import Test.Consensus.PointSchedule.Peers (Peers (..)) -import Test.Consensus.PointSchedule.Shrinking (shrinkHonestPeer) +import Test.Consensus.PointSchedule.Shrinking (shrinkHonestPeers) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) import Test.QuickCheck (Property, conjoin, counterexample) import Test.Tasty @@ -25,7 +25,6 @@ tests = [ testGroup "honest peer shrinking" [ testProperty "actually shortens the schedule" prop_shortens , testProperty "preserves the final state all peers" prop_preservesFinalStates - , testProperty "doesn't remove points of the adversarial schedule" prop_preserversAdversarial ] ] @@ -35,9 +34,6 @@ prop_shortens = checkShrinkProperty isShorterThan prop_preservesFinalStates :: Property prop_preservesFinalStates = checkShrinkProperty doesNotChangeFinalState -prop_preserversAdversarial :: Property -prop_preserversAdversarial = checkShrinkProperty doesNotRemoveAdversarialPoints - -- | Apparently, `unsnoc` hasn't been invented yet, so we'll do this manually lastM :: [a] -> Maybe a lastM [] = Nothing @@ -79,15 +75,6 @@ doesNotChangeFinalState original shrunk = lastBP :: PeerSchedule blk -> Maybe (SchedulePoint blk) lastBP sch = lastM $ mapMaybe (\case (_, p@(ScheduleBlockPoint _)) -> Just p ; _ -> Nothing) sch -doesNotRemoveAdversarialPoints :: Eq blk => PeersSchedule blk -> PeersSchedule blk -> Bool -doesNotRemoveAdversarialPoints original shrunk = - samePeers original shrunk - && (and $ zipWith - (\oldSch newSch -> fmap snd oldSch == fmap snd newSch) - (toList $ adversarialPeers original) - (toList $ adversarialPeers shrunk) - ) - checkShrinkProperty :: (PeersSchedule TestBlock -> PeersSchedule TestBlock -> Bool) -> Property checkShrinkProperty prop = forAllBlind @@ -103,5 +90,5 @@ checkShrinkProperty prop = ) (prop schedule shrunk) ) - (shrinkHonestPeer schedule) + (shrinkHonestPeers schedule) ) From a592c653ca4c9303bd9dee7c55ef81760751fd23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Tue, 21 May 2024 19:25:26 +0000 Subject: [PATCH 13/26] Avoid reconstructing the candidateSuffixes in densityDisconnect --- .../Ouroboros/Consensus/Genesis/Governor.hs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 99df6870e4..94ce201076 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -403,8 +403,10 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe firstSlotAfterGenesisWindow = succWithOrigin loeIntersectionSlot + SlotNo sgen + -- This is performance sensitive. We used to call @takeWhileOldest@ here, + -- which would reconstruct much of the original fragment. dropBeyondGenesisWindow = - AF.takeWhileOldest ((< firstSlotAfterGenesisWindow) . blockSlot) + AF.dropWhileNewest ((>= firstSlotAfterGenesisWindow) . blockSlot) clippedFrags = Map.map dropBeyondGenesisWindow candidateSuffixes From 5eac5d363b7c85e4fd20f8efa2f82370cedeaaa1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Wed, 15 May 2024 10:49:16 +0000 Subject: [PATCH 14/26] Rename `PeersSchedule` into `PointSchedule` and make it a newtype Instead of a type alias. This could help catching bugs, but, more importantly, this paves the way to making it a data-type and adding more fields to it, although a lot of the fixes of this commit would then crumble immediately. --- .../Test/Consensus/Genesis/Setup.hs | 2 +- .../Consensus/Genesis/Setup/Classifiers.hs | 8 +-- .../Test/Consensus/Genesis/Tests/CSJ.hs | 22 ++++---- .../Genesis/Tests/DensityDisconnect.hs | 6 +-- .../Test/Consensus/Genesis/Tests/LoE.hs | 6 +-- .../Test/Consensus/Genesis/Tests/LoP.hs | 19 ++++--- .../Test/Consensus/Genesis/Tests/Uniform.hs | 18 +++---- .../Test/Consensus/PeerSimulator/Run.hs | 7 +-- .../PeerSimulator/Tests/LinkedThreads.hs | 4 +- .../Consensus/PeerSimulator/Tests/Rollback.hs | 4 +- .../Consensus/PeerSimulator/Tests/Timeouts.hs | 4 +- .../Test/Consensus/PointSchedule.hs | 52 ++++++++++--------- .../Test/Consensus/PointSchedule/Shrinking.hs | 28 +++++----- .../PointSchedule/Shrinking/Tests.hs | 28 +++++----- 14 files changed, 107 insertions(+), 101 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup.hs index ae6eb10095..a1e661c264 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup.hs @@ -59,7 +59,7 @@ runGenesisTest schedulerConfig genesisTest = (recordingTracer, getTrace) <- recordingTracerM let tracer = if scDebug schedulerConfig then debugTracer else recordingTracer - traceLinesWith tracer $ prettyGenesisTest prettyPeersSchedule genesisTest + traceLinesWith tracer $ prettyGenesisTest prettyPointSchedule genesisTest rgtrStateView <- runPointSchedule schedulerConfig genesisTest =<< tracerTestBlock tracer traceWith tracer (condense rgtrStateView) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs index a7eb599293..f9a7af93a8 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs @@ -164,7 +164,7 @@ resultClassifiers GenesisTest{gtSchedule} RunGenesisTestResult{rgtrStateView} = StateView{svPeerSimulatorResults} = rgtrStateView adversaries :: [PeerId] - adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers gtSchedule + adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers $ unPointSchedule gtSchedule adversariesCount = fromIntegral $ length adversaries @@ -248,19 +248,19 @@ scheduleClassifiers GenesisTest{gtSchedule = schedule} = peerSch rollbacks :: Peers Bool - rollbacks = hasRollback <$> schedule + rollbacks = hasRollback <$> unPointSchedule schedule adversaryRollback = any id $ adversarialPeers rollbacks honestRollback = any id $ honestPeers rollbacks - allAdversariesEmpty = all id $ adversarialPeers $ null <$> schedule + allAdversariesEmpty = all id $ adversarialPeers $ null <$> unPointSchedule schedule isTrivial :: PeerSchedule TestBlock -> Bool isTrivial = \case [] -> True (t0, _):points -> all ((== t0) . fst) points - allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> schedule + allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> unPointSchedule schedule simpleHash :: HeaderHash block ~ TestHash => diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index c0a3c47dc9..3e38e72f09 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -124,17 +124,19 @@ prop_CSJ happy synchronized = receivedHeadersAtMostOnceFromHonestPeers ) where - genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PeersSchedule TestBlock) + genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PointSchedule TestBlock) genDuplicatedHonestSchedule gt@GenesisTest {gtExtraHonestPeers} = do - Peers {honestPeers, adversarialPeers} <- genUniformSchedulePoints gt - pure $ - Peers.unionWithKey - (\_ _ _ -> error "should not happen") - ( peers' - (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) - [] - ) - (Peers Map.empty adversarialPeers) + ps@PointSchedule {unPointSchedule = Peers {honestPeers, adversarialPeers}} <- genUniformSchedulePoints gt + pure $ ps { + unPointSchedule = + Peers.unionWithKey + (\_ _ _ -> error "should not happen") + ( peers' + (replicate (fromIntegral gtExtraHonestPeers + 1) (getHonestPeer honestPeers)) + [] + ) + (Peers Map.empty adversarialPeers) + } isNewerThanJumpSizeFromTip :: GenesisTestFull TestBlock -> Header TestBlock -> Bool isNewerThanJumpSizeFromTip gt hdr = diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index 7c6d9e75c8..c2618bf8f0 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -471,7 +471,7 @@ prop_densityDisconnectTriggersChainSel = ( \GenesisTest {gtBlockTree, gtSchedule} stateView@StateView {svTipBlock} -> let - othersCount = Map.size (adversarialPeers gtSchedule) + othersCount = Map.size (adversarialPeers $ unPointSchedule gtSchedule) exnCorrect = case exceptionsByComponent ChainSyncClient stateView of [fromException -> Just DensityTooLow] -> True [] | othersCount == 0 -> True @@ -490,7 +490,7 @@ prop_densityDisconnectTriggersChainSel = -- which should allow the GDD to realize that the chain -- is not dense enough, and that the whole of the honest -- chain should be selected. - lowDensitySchedule :: HasHeader blk => BlockTree blk -> Peers (PeerSchedule blk) + lowDensitySchedule :: HasHeader blk => BlockTree blk -> PointSchedule blk lowDensitySchedule tree = let trunkTip = getTrunkTip tree branch = getOnlyBranch tree @@ -498,7 +498,7 @@ prop_densityDisconnectTriggersChainSel = (AF.Empty _) -> Origin (_ AF.:> tipBlock) -> At tipBlock advTip = getOnlyBranchTip tree - in peers' + in PointSchedule $ peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain up to the intersection. [[(Time 0, scheduleTipPoint trunkTip), diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index 0aecf4a182..215c7b8721 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -19,7 +19,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, peers') +import Test.Consensus.PointSchedule.Peers (peers') import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, scheduleHeaderPoint, scheduleTipPoint) @@ -80,7 +80,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled = in selectedCorrect && exceptionsCorrect ) where - delaySchedule :: HasHeader blk => BlockTree blk -> Peers (PeerSchedule blk) + delaySchedule :: HasHeader blk => BlockTree blk -> PointSchedule blk delaySchedule tree = let trunkTip = getTrunkTip tree branch = getOnlyBranch tree @@ -88,7 +88,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled = (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock branchTip = getOnlyBranchTip tree - in peers' + in PointSchedule $ peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index 5e27804094..d6b08caa57 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -22,8 +22,7 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers, peers', - peersOnlyHonest) +import Test.Consensus.PointSchedule.Peers (peers', peersOnlyHonest) import Test.Consensus.PointSchedule.Shrinking (shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, scheduleHeaderPoint, scheduleTipPoint) @@ -74,11 +73,11 @@ prop_wait mustTimeout = _ -> False ) where - dullSchedule :: (HasHeader blk) => DiffTime -> AnchoredFragment blk -> Peers (PeerSchedule blk) + dullSchedule :: (HasHeader blk) => DiffTime -> AnchoredFragment blk -> PointSchedule blk dullSchedule _ (AF.Empty _) = error "requires a non-empty block tree" dullSchedule timeout (_ AF.:> tipBlock) = let offset :: DiffTime = if mustTimeout then 1 else -1 - in peersOnlyHonest $ + in PointSchedule $ peersOnlyHonest $ [ (Time 0, scheduleTipPoint tipBlock), -- This last point does not matter, it is only here to leave the -- connection open (aka. keep the test running) long enough to @@ -105,10 +104,10 @@ prop_waitBehindForecastHorizon = _ -> False ) where - dullSchedule :: (HasHeader blk) => AnchoredFragment blk -> Peers (PeerSchedule blk) + dullSchedule :: (HasHeader blk) => AnchoredFragment blk -> PointSchedule blk dullSchedule (AF.Empty _) = error "requires a non-empty block tree" dullSchedule (_ AF.:> tipBlock) = - peersOnlyHonest $ + PointSchedule $ peersOnlyHonest $ [ (Time 0, scheduleTipPoint tipBlock), (Time 0, scheduleHeaderPoint tipBlock), (Time 11, scheduleBlockPoint tipBlock) @@ -166,10 +165,10 @@ prop_serve mustTimeout = -- \| Make a schedule serving the given fragment with regularity, one block -- every 'timeBetweenBlocks'. NOTE: We must do something at @Time 0@ -- otherwise the others times will be shifted such that the first one is 0. - makeSchedule :: (HasHeader blk) => AnchoredFragment blk -> Peers (PeerSchedule blk) + makeSchedule :: (HasHeader blk) => AnchoredFragment blk -> PointSchedule blk makeSchedule (AF.Empty _) = error "fragment must have at least one block" makeSchedule fragment@(_ AF.:> tipBlock) = - peersOnlyHonest $ + PointSchedule $ peersOnlyHonest $ (Time 0, scheduleTipPoint tipBlock) : ( flip concatMap (zip [1 ..] (AF.toOldestFirst fragment)) $ \(i, block) -> [ (Time (secondsRationalToDiffTime (i * timeBetweenBlocks)), scheduleHeaderPoint block), @@ -216,7 +215,7 @@ prop_delayAttack lopEnabled = in selectedCorrect && exceptionsCorrect ) where - delaySchedule :: (HasHeader blk) => BlockTree blk -> Peers (PeerSchedule blk) + delaySchedule :: (HasHeader blk) => BlockTree blk -> PointSchedule blk delaySchedule tree = let trunkTip = getTrunkTip tree branch = getOnlyBranch tree @@ -224,7 +223,7 @@ prop_delayAttack lopEnabled = (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock branchTip = getOnlyBranchTip tree - in peers' + in PointSchedule $ peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index dce4642f06..709426588e 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -95,7 +95,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = immutableTipIsRecent ] where - advCount = Map.size (adversarialPeers (gtSchedule genesisTest)) + advCount = Map.size (adversarialPeers (unPointSchedule $ gtSchedule genesisTest)) immutableTipIsRecent = counterexample ("Age of the immutable tip: " ++ show immutableTipAge) $ @@ -129,7 +129,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = [] -> "No peers were disconnected" peers -> "Some peers were disconnected: " ++ intercalate ", " (condense <$> peers) - honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ gtSchedule genesisTest + honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ unPointSchedule $ gtSchedule genesisTest GenesisTest {gtBlockTree, gtGenesisWindow = GenesisWindow s, gtDelay = Delta d} = genesisTest @@ -168,7 +168,7 @@ prop_serveAdversarialBranches = forAllGenesisTest theProperty -genUniformSchedulePoints :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) +genUniformSchedulePoints :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) genUniformSchedulePoints gt = stToGen (uniformPoints pointsGeneratorParams (gtBlockTree gt)) where pointsGeneratorParams = PointsGeneratorParams @@ -222,11 +222,11 @@ prop_leashingAttackStalling = -- This is achieved by dropping random points from the schedule of each peer -- and by adding sufficient time at the end of a test to allow LoP and -- timeouts to disconnect adversaries. - genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) + genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) genLeashingSchedule genesisTest = do - Peers honest advs0 <- ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest + Peers honest advs0 <- unPointSchedule . ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest advs <- mapM dropRandomPoints advs0 - pure $ Peers honest advs + pure $ PointSchedule $ Peers honest advs disableBoringTimeouts gt = gt { gtChainSyncTimeouts = (gtChainSyncTimeouts gt) @@ -277,9 +277,9 @@ prop_leashingAttackTimeLimited = where -- | A schedule which doesn't run past the last event of the honest peer - genTimeLimitedSchedule :: GenesisTest TestBlock () -> QC.Gen (PeersSchedule TestBlock) + genTimeLimitedSchedule :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) genTimeLimitedSchedule genesisTest = do - Peers honests advs0 <- genUniformSchedulePoints genesisTest + Peers honests advs0 <- unPointSchedule <$> genUniformSchedulePoints genesisTest let timeLimit = estimateTimeBound (gtChainSyncTimeouts genesisTest) (gtLoPBucketParams genesisTest) @@ -287,7 +287,7 @@ prop_leashingAttackTimeLimited = (Map.elems advs0) advs = fmap (takePointsUntil timeLimit) advs0 extendedHonests = extendScheduleUntil timeLimit <$> honests - pure $ Peers extendedHonests advs + pure $ PointSchedule $ Peers extendedHonests advs takePointsUntil limit = takeWhile ((<= limit) . fst) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index a8ff5e01ef..c88cc36e86 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -56,7 +56,8 @@ import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PeerSimulator.Trace import Test.Consensus.PointSchedule (BlockFetchTimeout, CSJParams (..), GenesisTest (..), GenesisTestFull, - LoPBucketParams (..), PeersSchedule, peersStatesRelative) + LoPBucketParams (..), PointSchedule (..), + peersStatesRelative) import Test.Consensus.PointSchedule.NodeState (NodeState) import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId, getPeerIds) @@ -268,7 +269,7 @@ runScheduler :: IOLike m => Tracer m (TraceSchedulerEvent blk) -> StrictTVar m (Map PeerId (ChainSyncClientHandle m blk)) -> - PeersSchedule blk -> + PointSchedule blk -> Map PeerId (PeerResources m blk) -> NodeLifecycle blk m -> m (ChainDB m blk, StateViewTracers blk m) @@ -467,7 +468,7 @@ runPointSchedule :: m (StateView TestBlock) runPointSchedule schedulerConfig genesisTest tracer0 = withRegistry $ \registry -> do - peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds gtSchedule) + peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds $ unPointSchedule gtSchedule) lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim (chainDb, stateViewTracers) <- runScheduler (Tracer $ traceWith tracer . TraceSchedulerEvent) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs index d17869d726..d1e4d8e2a5 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs @@ -62,12 +62,12 @@ prop_chainSyncKillsBlockFetch = do _ -> False ) where - dullSchedule :: GenesisTest blk () -> DiffTime -> PeersSchedule blk + dullSchedule :: GenesisTest blk () -> DiffTime -> PointSchedule blk dullSchedule GenesisTest {gtBlockTree} timeout = let (firstBlock, secondBlock) = case AF.toOldestFirst $ btTrunk gtBlockTree of b1 : b2 : _ -> (b1, b2) _ -> error "block tree must have two blocks" - in peersOnlyHonest $ + in PointSchedule $ peersOnlyHonest $ [ (Time 0, scheduleTipPoint secondBlock), (Time 0, scheduleHeaderPoint firstBlock), (Time (timeout + 1), scheduleBlockPoint firstBlock) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs index cd327b0b5f..dc27bbc176 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs @@ -84,7 +84,7 @@ prop_cannotRollback = -- chain of the given block tree. -- -- PRECONDITION: Block tree with at least one alternative chain. -rollbackSchedule :: AF.HasHeader blk => Int -> BlockTree blk -> PeersSchedule blk +rollbackSchedule :: AF.HasHeader blk => Int -> BlockTree blk -> PointSchedule blk rollbackSchedule n blockTree = let branch = case btBranches blockTree of [b] -> b @@ -95,7 +95,7 @@ rollbackSchedule n blockTree = , banalSchedulePoints trunkSuffix , banalSchedulePoints (btbSuffix branch) ] - in peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints + in PointSchedule $ peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints where banalSchedulePoints :: AnchoredFragment blk -> [SchedulePoint blk] banalSchedulePoints = concatMap banalSchedulePoints' . toOldestFirst diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs index 25911f2b23..3cdde2a1e0 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs @@ -59,11 +59,11 @@ prop_timeouts mustTimeout = do ) where - dullSchedule :: AF.HasHeader blk => DiffTime -> AF.AnchoredFragment blk -> PeersSchedule blk + dullSchedule :: AF.HasHeader blk => DiffTime -> AF.AnchoredFragment blk -> PointSchedule blk dullSchedule _ (AF.Empty _) = error "requires a non-empty block tree" dullSchedule timeout (_ AF.:> tipBlock) = let offset :: DiffTime = if mustTimeout then 1 else -1 - in peersOnlyHonest $ [ + in PointSchedule $ peersOnlyHonest $ [ (Time 0, scheduleTipPoint tipBlock), (Time 0, scheduleHeaderPoint tipBlock), (Time 0, scheduleBlockPoint tipBlock), diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs index 3171bbf826..4c4520039f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs @@ -30,7 +30,7 @@ module Test.Consensus.PointSchedule ( , GenesisWindow (..) , LoPBucketParams (..) , PeerSchedule - , PeersSchedule + , PointSchedule (..) , PointsGeneratorParams (..) , RunGenesisTestResult (..) , enrichedWith @@ -42,7 +42,7 @@ module Test.Consensus.PointSchedule ( , peersStates , peersStatesRelative , prettyGenesisTest - , prettyPeersSchedule + , prettyPointSchedule , stToGen , uniformPoints ) where @@ -93,14 +93,14 @@ import Test.Util.TersePrinting (terseFragment) import Test.Util.TestBlock (TestBlock) import Text.Printf (printf) -prettyPeersSchedule :: +prettyPointSchedule :: forall blk. (CondenseList (NodeState blk)) => - PeersSchedule blk -> + PointSchedule blk -> [String] -prettyPeersSchedule peers = - [ "honest peers: " ++ show (Map.size (honestPeers peers)) - , "adversaries: " ++ show (Map.size (adversarialPeers peers)) +prettyPointSchedule peers = + [ "honest peers: " ++ show (Map.size (honestPeers $ unPointSchedule peers)) + , "adversaries: " ++ show (Map.size (adversarialPeers $ unPointSchedule peers)) ] ++ zipWith3 (\number time peerState -> @@ -156,12 +156,12 @@ peerStates Peer {name, value = schedulePoints} = -- | Convert several @SinglePeer@ schedules to a common 'NodeState' schedule. -- -- The resulting schedule contains all the peers. Items are sorted by time. -peersStates :: PeersSchedule blk -> [(Time, Peer (NodeState blk))] -peersStates peers = foldr (mergeOn fst) [] (peerStates <$> toList (peersList peers)) +peersStates :: PointSchedule blk -> [(Time, Peer (NodeState blk))] +peersStates peers = foldr (mergeOn fst) [] (peerStates <$> toList (peersList $ unPointSchedule peers)) -- | Same as 'peersStates' but returns the duration of a state instead of the -- absolute time at which it starts holding. -peersStatesRelative :: PeersSchedule blk -> [(DiffTime, Peer (NodeState blk))] +peersStatesRelative :: PointSchedule blk -> [(DiffTime, Peer (NodeState blk))] peersStatesRelative peers = let (starts, states) = unzip $ peersStates peers durations = snd (mapAccumL (\ prev start -> (start, diffTime start prev)) (Time 0) (drop 1 starts)) ++ [0.1] @@ -173,11 +173,13 @@ type PeerSchedule blk = [(Time, SchedulePoint blk)] peerScheduleBlocks :: (PeerSchedule blk) -> [blk] peerScheduleBlocks = mapMaybe (withOriginToMaybe . schedulePointToBlock . snd) -type PeersSchedule blk = Peers (PeerSchedule blk) +newtype PointSchedule blk = PointSchedule { + unPointSchedule :: Peers (PeerSchedule blk) + } -- | List of all blocks appearing in the schedules. -peerSchedulesBlocks :: PeersSchedule blk -> [blk] -peerSchedulesBlocks = concatMap (peerScheduleBlocks . value) . toList . peersList +peerSchedulesBlocks :: PointSchedule blk -> [blk] +peerSchedulesBlocks = concatMap (peerScheduleBlocks . value) . toList . peersList . unPointSchedule ---------------------------------------------------------------------------------------------------- -- Schedule generators @@ -193,11 +195,11 @@ longRangeAttack :: (StatefulGen g m, AF.HasHeader blk) => BlockTree blk -> g -> - m (PeersSchedule blk) + m (PointSchedule blk) longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do honest <- peerScheduleFromTipPoints g honParams [(IsTrunk, [AF.length btTrunk - 1])] btTrunk [] adv <- peerScheduleFromTipPoints g advParams [(IsBranch, [AF.length (btbFull branch) - 1])] btTrunk [btbFull branch] - pure (peers' [honest] [adv]) + pure $ PointSchedule $ peers' [honest] [adv] where honParams = defaultPeerScheduleParams {pspHeaderDelayInterval = (0.3, 0.4)} advParams = defaultPeerScheduleParams {pspTipDelayInterval = (0, 0.1)} @@ -217,7 +219,7 @@ uniformPoints :: PointsGeneratorParams -> BlockTree blk -> g -> - m (PeersSchedule blk) + m (PointSchedule blk) uniformPoints PointsGeneratorParams {pgpExtraHonestPeers, pgpDowntime} = case pgpDowntime of NoDowntime -> uniformPointsWithExtraHonestPeers pgpExtraHonestPeers DowntimeWithSecurityParam k -> uniformPointsWithExtraHonestPeersAndDowntime pgpExtraHonestPeers k @@ -233,7 +235,7 @@ uniformPointsWithExtraHonestPeers :: Int -> BlockTree blk -> g -> - m (PeersSchedule blk) + m (PointSchedule blk) uniformPointsWithExtraHonestPeers extraHonestPeers BlockTree {btTrunk, btBranches} @@ -243,7 +245,7 @@ uniformPointsWithExtraHonestPeers honests <- replicateM (extraHonestPeers + 1) $ mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] advs <- takeBranches btBranches - pure (peers' honests advs) + pure $ PointSchedule $ peers' honests advs where takeBranches = \case [] -> pure [] @@ -355,7 +357,7 @@ uniformPointsWithExtraHonestPeersAndDowntime :: SecurityParam -> BlockTree blk -> g -> - m (PeersSchedule blk) + m (PointSchedule blk) uniformPointsWithExtraHonestPeersAndDowntime extraHonestPeers (SecurityParam k) @@ -372,7 +374,7 @@ uniformPointsWithExtraHonestPeersAndDowntime mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] advs <- takeBranches pauseSlot btBranches let (honests', advs') = syncTips honests advs - pure (peers' honests' advs') + pure $ PointSchedule $ peers' honests' advs' where takeBranches pause = \case [] -> pure [] @@ -470,7 +472,7 @@ data GenesisTest blk schedule = GenesisTest gtSchedule :: schedule } -type GenesisTestFull blk = GenesisTest blk (PeersSchedule blk) +type GenesisTestFull blk = GenesisTest blk (PointSchedule blk) -- | All the data describing the result of a test data RunGenesisTestResult = RunGenesisTestResult @@ -540,9 +542,9 @@ duplicateLastPoint d xs = let (t, p) = last xs in xs ++ [(addTime d t, p)] -ensureScheduleDuration :: GenesisTest blk a -> PeersSchedule blk -> PeersSchedule blk -ensureScheduleDuration gt peers = - duplicateLastPoint endingDelay <$> peers +ensureScheduleDuration :: GenesisTest blk a -> PointSchedule blk -> PointSchedule blk +ensureScheduleDuration gt ps = + PointSchedule $ duplicateLastPoint endingDelay <$> unPointSchedule ps where endingDelay = let cst = gtChainSyncTimeouts gt @@ -553,4 +555,4 @@ ensureScheduleDuration gt peers = , busyTimeout bft , streamingTimeout bft ]) - peerCount = length (peersList peers) + peerCount = length (peersList $ unPointSchedule ps) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index 6abab846ce..f4fb4ef54f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -21,7 +21,7 @@ import Test.Consensus.BlockTree (BlockTree (..), BlockTreeBranch (..), addBranch', mkTrunk) import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule (GenesisTest (..), - GenesisTestFull, PeerSchedule, PeersSchedule, + GenesisTestFull, PeerSchedule, PointSchedule (..), peerSchedulesBlocks) import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) @@ -29,7 +29,7 @@ import Test.QuickCheck (shrinkList) import Test.Util.TestBlock (TestBlock, isAncestorOf, isStrictAncestorOf) --- | Shrink a 'Peers PeerSchedule'. This does not affect the honest peer; it +-- | Shrink a 'PointSchedule'. This does not affect the honest peer; it -- does, however, attempt to remove other peers or ticks of other peers. The -- block tree is trimmed to keep only parts that are necessary for the shrunk -- schedule. @@ -40,11 +40,11 @@ shrinkPeerSchedules :: shrinkPeerSchedules genesisTest _stateView = let trimmedBlockTree sch = trimBlockTree' sch (gtBlockTree genesisTest) shrunkAdversarialPeers = - shrinkAdversarialPeers shrinkAdversarialPeer (gtSchedule genesisTest) + shrinkAdversarialPeers shrinkAdversarialPeer (unPointSchedule $ gtSchedule genesisTest) <&> \shrunkSchedule -> genesisTest - { gtSchedule = shrunkSchedule, - gtBlockTree = trimmedBlockTree shrunkSchedule + { gtSchedule = PointSchedule shrunkSchedule, + gtBlockTree = trimmedBlockTree $ PointSchedule shrunkSchedule } shrunkHonestPeers = shrinkHonestPeers @@ -53,7 +53,7 @@ shrinkPeerSchedules genesisTest _stateView = <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} in shrunkAdversarialPeers ++ shrunkHonestPeers --- | Shrink a 'Peers PeerSchedule' by removing adversaries. This does not affect +-- | Shrink a 'PointSchedule' by removing adversaries. This does not affect -- the honest peer; and it does not remove ticks from the schedules of the -- remaining adversaries. shrinkByRemovingAdversaries :: @@ -61,13 +61,13 @@ shrinkByRemovingAdversaries :: StateView TestBlock -> [GenesisTestFull TestBlock] shrinkByRemovingAdversaries genesisTest _stateView = - shrinkAdversarialPeers (const []) (gtSchedule genesisTest) <&> \shrunkSchedule -> - let trimmedBlockTree = trimBlockTree' shrunkSchedule (gtBlockTree genesisTest) - in (genesisTest{gtSchedule = shrunkSchedule, gtBlockTree = trimmedBlockTree}) + shrinkAdversarialPeers (const []) (unPointSchedule $ gtSchedule genesisTest) <&> \shrunkSchedule -> + let trimmedBlockTree = trimBlockTree' (PointSchedule shrunkSchedule) (gtBlockTree genesisTest) + in (genesisTest{gtSchedule = PointSchedule shrunkSchedule, gtBlockTree = trimmedBlockTree}) -- | Shrink a 'PeerSchedule' by removing ticks from it. The other ticks are kept -- unchanged. -shrinkAdversarialPeer :: (PeerSchedule blk) -> [PeerSchedule blk] +shrinkAdversarialPeer :: PeerSchedule blk -> [PeerSchedule blk] shrinkAdversarialPeer = shrinkList (const []) -- | Shrink the 'others' field of a 'Peers' structure by attempting to remove @@ -88,12 +88,12 @@ shrinkAdversarialPeers shrink Peers {honestPeers, adversarialPeers} = -- NOTE: This operation makes the honest peer to end its schedule sooner, which *may* -- trigger disconnections when the timeout for MsgAwaitReply is reached. In those cases, -- it is probably more pertinent to disable this timeout in tests than to disable shrinking. -shrinkHonestPeers :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] -shrinkHonestPeers Peers {honestPeers, adversarialPeers} = do +shrinkHonestPeers :: PointSchedule blk -> [PointSchedule blk] +shrinkHonestPeers PointSchedule {unPointSchedule = Peers {honestPeers, adversarialPeers}} = do (k, honestSch) <- Map.toList honestPeers let (lastHonest, _) = last honestSch shrunk <- shrinkHonestPeer honestSch - pure $ Peers + pure $ PointSchedule $ Peers { honestPeers = Map.insert k shrunk honestPeers , adversarialPeers = fmap (extendAdversary lastHonest) adversarialPeers } @@ -147,7 +147,7 @@ speedUpTheSchedule sch (at, speedUpBy) = -- | Remove blocks from the given block tree that are not necessary for the -- given peer schedules. If entire branches are unused, they are removed. If the -- trunk is unused, then it remains as an empty anchored fragment. -trimBlockTree' :: PeersSchedule TestBlock -> BlockTree TestBlock -> BlockTree TestBlock +trimBlockTree' :: PointSchedule TestBlock -> BlockTree TestBlock -> BlockTree TestBlock trimBlockTree' = keepOnlyAncestorsOf . peerSchedulesBlocks -- | Given some blocks and a block tree, keep only the prefix of the block tree diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index 311549a7be..e85087205f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -9,8 +9,8 @@ import Data.Map (keys) import Data.Maybe (mapMaybe) import Test.Consensus.Genesis.Setup (genChains) import Test.Consensus.Genesis.Tests.Uniform (genUniformSchedulePoints) -import Test.Consensus.PointSchedule (PeerSchedule, PeersSchedule, - prettyPeersSchedule) +import Test.Consensus.PointSchedule (PeerSchedule, PointSchedule (..), + prettyPointSchedule) import Test.Consensus.PointSchedule.Peers (Peers (..)) import Test.Consensus.PointSchedule.Shrinking (shrinkHonestPeers) import Test.Consensus.PointSchedule.SinglePeer (SchedulePoint (..)) @@ -40,22 +40,24 @@ lastM [] = Nothing lastM [a] = Just a lastM (_:ps) = lastM ps -samePeers :: PeersSchedule blk -> PeersSchedule blk -> Bool -samePeers sch1 sch2 = (keys $ adversarialPeers sch1) == (keys $ adversarialPeers sch2) +samePeers :: PointSchedule blk -> PointSchedule blk -> Bool +samePeers sch1 sch2 = + (keys $ adversarialPeers $ unPointSchedule sch1) + == (keys $ adversarialPeers $ unPointSchedule sch2) -- | Checks whether at least one peer schedule in the second given peers schedule -- is shorter than its corresponding one in the fist given peers schedule. “Shorter” -- here means that it executes in less time. -isShorterThan :: PeersSchedule blk -> PeersSchedule blk -> Bool +isShorterThan :: PointSchedule blk -> PointSchedule blk -> Bool isShorterThan original shrunk = samePeers original shrunk && (or $ zipWith (\oldSch newSch -> (fst <$> lastM newSch) < (fst <$> lastM oldSch)) - (toList original) - (toList shrunk) + (toList $ unPointSchedule original) + (toList $ unPointSchedule shrunk) ) -doesNotChangeFinalState :: Eq blk => PeersSchedule blk -> PeersSchedule blk -> Bool +doesNotChangeFinalState :: Eq blk => PointSchedule blk -> PointSchedule blk -> Bool doesNotChangeFinalState original shrunk = samePeers original shrunk && (and $ zipWith @@ -64,8 +66,8 @@ doesNotChangeFinalState original shrunk = lastHP oldSch == lastHP newSch && lastBP oldSch == lastBP newSch ) - (toList original) - (toList shrunk) + (toList $ unPointSchedule original) + (toList $ unPointSchedule shrunk) ) where lastTP :: PeerSchedule blk -> Maybe (SchedulePoint blk) @@ -75,7 +77,7 @@ doesNotChangeFinalState original shrunk = lastBP :: PeerSchedule blk -> Maybe (SchedulePoint blk) lastBP sch = lastM $ mapMaybe (\case (_, p@(ScheduleBlockPoint _)) -> Just p ; _ -> Nothing) sch -checkShrinkProperty :: (PeersSchedule TestBlock -> PeersSchedule TestBlock -> Bool) -> Property +checkShrinkProperty :: (PointSchedule TestBlock -> PointSchedule TestBlock -> Bool) -> Property checkShrinkProperty prop = forAllBlind (genChains (choose (1, 4)) >>= genUniformSchedulePoints) @@ -84,9 +86,9 @@ checkShrinkProperty prop = (\shrunk -> counterexample ( "Original schedule:\n" - ++ unlines (map (" " ++) $ prettyPeersSchedule schedule) + ++ unlines (map (" " ++) $ prettyPointSchedule schedule) ++ "\nShrunk schedule:\n" - ++ unlines (map (" " ++) $ prettyPeersSchedule shrunk) + ++ unlines (map (" " ++) $ prettyPointSchedule shrunk) ) (prop schedule shrunk) ) From ab0da0679a8161cf27e1cefee213ebbb2682241d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Mon, 20 May 2024 11:40:59 +0000 Subject: [PATCH 15/26] Skip List/Map conversions in GDD governor --- .../Genesis/Tests/DensityDisconnect.hs | 16 +++---- .../Test/Consensus/PeerSimulator/Trace.hs | 19 ++++---- .../Ouroboros/Consensus/Genesis/Governor.hs | 46 +++++++++---------- 3 files changed, 39 insertions(+), 42 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index c2618bf8f0..fefeb18af9 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -86,7 +86,7 @@ data StaticCandidates = StaticCandidates { k :: SecurityParam, sgen :: GenesisWindow, - suffixes :: Map PeerId (AnchoredFragment (Header TestBlock)), + suffixes :: [(PeerId, AnchoredFragment (Header TestBlock))], tips :: Map PeerId (Tip TestBlock), loeFrag :: AnchoredFragment (Header TestBlock) } @@ -111,17 +111,17 @@ staticCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = } where (loeFrag, suffixes) = - sharedCandidatePrefix curChain (toHeaders <$> candidates) + sharedCandidatePrefix curChain (second toHeaders <$> candidates) selections = selection <$> branches selection branch = AF.takeOldest (AF.length (btbPrefix branch) + fromIntegral (maxRollbacks gtSecurityParam)) (btbFull branch) - tips = branchTip <$> candidates + tips = branchTip <$> Map.fromList candidates - candidates :: Map PeerId (AnchoredFragment TestBlock) - candidates = Map.fromList (zip (HonestPeer 1 : enumerateAdversaries) chains) + candidates :: [(PeerId, AnchoredFragment TestBlock)] + candidates = zip (HonestPeer 1 : enumerateAdversaries) chains chains = btTrunk gtBlockTree : (btbFull <$> branches) @@ -132,7 +132,7 @@ staticCandidates GenesisTest {gtSecurityParam, gtGenesisWindow, gtBlockTree} = prop_densityDisconnectStatic :: Property prop_densityDisconnectStatic = forAll gen $ \ StaticCandidates {k, sgen, suffixes, loeFrag} -> do - let (disconnect, _) = densityDisconnect sgen k (mkState <$> suffixes) suffixes loeFrag + let (disconnect, _) = densityDisconnect sgen k (mkState <$> Map.fromList suffixes) suffixes loeFrag counterexample "it should disconnect some node" (not (null disconnect)) .&&. counterexample "it should not disconnect the honest peers" @@ -223,7 +223,7 @@ data UpdateEvent = UpdateEvent { -- | Peers that have been disconnected in the current step , killed :: Set PeerId -- | The GDD data - , bounds :: Map PeerId (DensityBounds TestBlock) + , bounds :: [(PeerId, DensityBounds TestBlock)] -- | The current chains , tree :: BlockTree (Header TestBlock) , loeFrag :: AnchoredFragment (Header TestBlock) @@ -381,7 +381,7 @@ evolveBranches EvolvingPeers {k, sgen, peers = initialPeers, fullTree} = csLatestSlot = Just (AF.headSlot csCandidate) } -- Run GDD. - (loeFrag, suffixes) = sharedCandidatePrefix curChain candidates + (loeFrag, suffixes) = sharedCandidatePrefix curChain (Map.toList candidates) (killedNow, bounds) = first Set.fromList $ densityDisconnect sgen k states suffixes loeFrag event = UpdateEvent { target, diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs index 06b03ef95f..45711215ac 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs @@ -21,8 +21,8 @@ module Test.Consensus.PeerSimulator.Trace ( ) where import Control.Tracer (Tracer (Tracer), contramap, traceWith) +import Data.Bifunctor (second) import Data.List (intersperse) -import qualified Data.Map as Map import Data.Time.Clock (DiffTime, diffTimeToPicoseconds) import Ouroboros.Consensus.Block (GenesisWindow (..), Header, Point, WithOrigin (NotOrigin, Origin), succWithOrigin) @@ -448,9 +448,9 @@ traceBlockFetchClientTerminationEventTestBlockWith pid tracer = \case where trace = traceUnitWith tracer ("BlockFetchClient " ++ condense pid) -prettyDensityBounds :: Map.Map PeerId (DensityBounds TestBlock) -> [String] +prettyDensityBounds :: [(PeerId, DensityBounds TestBlock)] -> [String] prettyDensityBounds bounds = - showPeers (showBounds <$> bounds) + showPeers (second showBounds <$> bounds) where showBounds DensityBounds {clippedFragment, offersMoreThanK, lowerBound, upperBound, hasBlockAfter, latestSlot, idling} = show lowerBound ++ "/" ++ show upperBound ++ "[" ++ more ++ "], " ++ @@ -475,8 +475,8 @@ prettyDensityBounds bounds = showIdling | idling = ", idling" | otherwise = "" - showPeers :: Map.Map PeerId String -> [String] - showPeers = fmap (\ (peer, v) -> " " ++ condense peer ++ ": " ++ v) . Map.toList +showPeers :: [(PeerId, String)] -> [String] +showPeers = map (\ (peer, v) -> " " ++ condense peer ++ ": " ++ v) -- * Other utilities terseGDDEvent :: TraceGDDEvent PeerId TestBlock -> String @@ -487,15 +487,15 @@ terseGDDEvent = \case " Selection: " ++ terseHFragment curChain, " Candidates:" ] ++ - showPeers (tersePoint . castPoint . AF.headPoint <$> candidates) ++ + showPeers (second (tersePoint . castPoint . AF.headPoint) <$> candidates) ++ [ " Candidate suffixes (bounds):" ] ++ - showPeers (terseHFragment . clippedFragment <$> bounds) ++ + showPeers (second (terseHFragment . clippedFragment) <$> bounds) ++ [" Density bounds:"] ++ prettyDensityBounds bounds ++ [" New candidate tips:"] ++ - showPeers (tersePoint . castPoint <$> Map.map AF.headPoint candidateSuffixes) ++ + showPeers (second (tersePoint . castPoint . AF.headPoint) <$> candidateSuffixes) ++ [ " Losing peers: " ++ show losingPeers, " Setting loeFrag: " ++ terseAnchor (AF.castAnchor loeHead) @@ -508,9 +508,6 @@ terseGDDEvent = \case winEnd = winStart + sgen - 1 SlotNo winStart = succWithOrigin (AF.anchorToSlotNo loeHead) - showPeers :: Map.Map PeerId String -> [String] - showPeers = fmap (\ (peer, v) -> " " ++ condense peer ++ ": " ++ v) . Map.toList - prettyTime :: Time -> String prettyTime (Time time) = let ps = diffTimeToPicoseconds time diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index 94ce201076..ca6f4dd4bc 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -37,11 +37,13 @@ module Ouroboros.Consensus.Genesis.Governor ( import Control.Monad (guard, when) import Control.Tracer (Tracer, traceWith) +import Data.Bifunctor (second) import Data.Containers.ListUtils (nubOrd) import Data.Foldable (for_) +import Data.Functor.Compose (Compose (..)) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map -import Data.Maybe (maybeToList) +import Data.Maybe (mapMaybe, maybeToList) import Data.Word (Word64) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig, configLedger, @@ -188,7 +190,7 @@ evaluateGDD cfg tracer stateView = do (loeFrag, candidateSuffixes) = sharedCandidatePrefix curChain candidates - candidates = csCandidate <$> states + candidates = Map.toList (csCandidate <$> states) msgen :: Maybe GenesisWindow -- This could also use 'runWithCachedSummary' if deemed desirable. @@ -235,15 +237,17 @@ evaluateGDD cfg tracer stateView = do sharedCandidatePrefix :: GetHeader blk => AnchoredFragment (Header blk) -> - Map peer (AnchoredFragment (Header blk)) -> - (AnchoredFragment (Header blk), Map peer (AnchoredFragment (Header blk))) + [(peer, AnchoredFragment (Header blk))] -> + (AnchoredFragment (Header blk), [(peer, AnchoredFragment (Header blk))]) sharedCandidatePrefix curChain candidates = - stripCommonPrefix (AF.anchor curChain) immutableTipSuffixes + second getCompose $ + stripCommonPrefix (AF.anchor curChain) $ + Compose immutableTipSuffixes where immutableTip = AF.anchorPoint curChain - splitAfterImmutableTip frag = - snd <$> AF.splitAfterPoint frag immutableTip + splitAfterImmutableTip (peer, frag) = + (,) peer . snd <$> AF.splitAfterPoint frag immutableTip immutableTipSuffixes = -- If a ChainSync client's candidate forks off before the @@ -252,7 +256,7 @@ sharedCandidatePrefix curChain candidates = -- 'InvalidIntersection' within that ChainSync client, so it's -- sound to pre-emptively discard their candidate from this -- 'Map' via 'mapMaybe'. - Map.mapMaybe splitAfterImmutableTip candidates + mapMaybe splitAfterImmutableTip candidates data DensityBounds blk = DensityBounds { @@ -296,21 +300,20 @@ densityDisconnect :: => GenesisWindow -> SecurityParam -> Map peer (ChainSyncState blk) - -> Map peer (AnchoredFragment (Header blk)) + -> [(peer, AnchoredFragment (Header blk))] -> AnchoredFragment (Header blk) - -> ([peer], Map peer (DensityBounds blk)) + -> ([peer], [(peer, DensityBounds blk)]) densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixes loeFrag = (losingPeers, densityBounds) where - densityBounds = Map.fromList $ do - (peer, clippedFragment) <- Map.toList clippedFrags + densityBounds = do + (peer, candidateSuffix) <- candidateSuffixes + let clippedFragment = dropBeyondGenesisWindow candidateSuffix state <- maybeToList (states Map.!? peer) -- Skip peers that haven't sent any headers yet. -- They should be disconnected by timeouts instead. latestSlot <- maybeToList (csLatestSlot state) - let candidateSuffix = candidateSuffixes Map.! peer - - idling = csIdling state + let idling = csIdling state -- Is there a block after the end of the Genesis window? hasBlockAfter = @@ -347,7 +350,7 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe pure (peer, DensityBounds {clippedFragment, offersMoreThanK, lowerBound, upperBound, hasBlockAfter, latestSlot, idling}) - losingPeers = nubOrd $ Map.toList densityBounds >>= \ + losingPeers = nubOrd $ densityBounds >>= \ (peer0 , DensityBounds { clippedFragment = frag0 , lowerBound = lb0 , upperBound = ub0 @@ -359,7 +362,7 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe -- from happening. if ub0 == 0 then pure peer0 else do (_peer1, DensityBounds {clippedFragment = frag1, offersMoreThanK, lowerBound = lb1 }) <- - Map.toList densityBounds + densityBounds -- Don't disconnect peer0 if it sent no headers after the intersection yet -- and it is not idling. -- @@ -408,9 +411,6 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe dropBeyondGenesisWindow = AF.dropWhileNewest ((>= firstSlotAfterGenesisWindow) . blockSlot) - clippedFrags = - Map.map dropBeyondGenesisWindow candidateSuffixes - -- Note [Chain disagreement] -- ~~~~~~~~~~~~~~~~~~~~~~~~~ -- @@ -436,10 +436,10 @@ densityDisconnect (GenesisWindow sgen) (SecurityParam k) states candidateSuffixe data TraceGDDEvent peer blk = TraceGDDEvent { - bounds :: Map peer (DensityBounds blk), + bounds :: [(peer, DensityBounds blk)], curChain :: AnchoredFragment (Header blk), - candidates :: Map peer (AnchoredFragment (Header blk)), - candidateSuffixes :: Map peer (AnchoredFragment (Header blk)), + candidates :: [(peer, AnchoredFragment (Header blk))], + candidateSuffixes :: [(peer, AnchoredFragment (Header blk))], losingPeers :: [peer], loeHead :: AF.Anchor (Header blk), sgen :: GenesisWindow From 2acc4b38f65aec8d1b922dc7b59229d65d1f8551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Fri, 24 May 2024 14:00:50 +0000 Subject: [PATCH 16/26] Only get the successors of the immutable tip when reprocessing chain sel for LoE --- .../Consensus/Storage/ChainDB/Impl/ChainSel.hs | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 156472c296..bbe9473b6c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -314,12 +314,16 @@ chainSelSync cdb@CDB{..} ChainSelReprocessLoEBlocks = lift cdbLoE >>= \case <*> Query.getCurrentChain cdb let succsOf' = Set.toList . succsOf . pointHash . castPoint - chainPoints = AF.anchorPoint chain : (blockPoint <$> AF.toOldestFirst chain) - loeHashes = succsOf' =<< chainPoints - loeHeaders <- lift (mapM (VolatileDB.getKnownBlockComponent cdbVolatileDB GetHeader) loeHashes) + loeHashes = succsOf' (AF.anchorPoint chain) + firstHeader = either (const Nothing) Just $ AF.last chain + -- We avoid the VolatileDB for the headers we already have in the chain + getHeaderFromHash hash = + case firstHeader of + Just header | headerHash header == hash -> pure header + _ -> VolatileDB.getKnownBlockComponent cdbVolatileDB GetHeader hash + loeHeaders <- lift (mapM getHeaderFromHash loeHashes) for_ loeHeaders $ \hdr -> - unless (AF.withinFragmentBounds (blockPoint hdr) chain) $ do - void (chainSelectionForBlock cdb BlockCache.empty hdr noPunishment) + void (chainSelectionForBlock cdb BlockCache.empty hdr noPunishment) chainSelSync cdb@CDB {..} (ChainSelAddBlock BlockToAdd { blockToAdd = b, .. }) = do (isMember, invalid, curChain) <- lift $ atomically $ (,,) From e882d90de0df24a088e5265715c7b054d7721491 Mon Sep 17 00:00:00 2001 From: Alexander Esgen Date: Fri, 31 May 2024 15:56:18 +0200 Subject: [PATCH 17/26] Add changelog fragments --- .../20240531_155304_alexander.esgen_milestone_13.md | 12 ++++++++++++ .../20240531_155125_alexander.esgen_milestone_13.md | 4 ++++ 2 files changed, 16 insertions(+) create mode 100644 ouroboros-consensus-diffusion/changelog.d/20240531_155304_alexander.esgen_milestone_13.md create mode 100644 ouroboros-consensus/changelog.d/20240531_155125_alexander.esgen_milestone_13.md diff --git a/ouroboros-consensus-diffusion/changelog.d/20240531_155304_alexander.esgen_milestone_13.md b/ouroboros-consensus-diffusion/changelog.d/20240531_155304_alexander.esgen_milestone_13.md new file mode 100644 index 0000000000..97c0335310 --- /dev/null +++ b/ouroboros-consensus-diffusion/changelog.d/20240531_155304_alexander.esgen_milestone_13.md @@ -0,0 +1,12 @@ +### Breaking + +- Integrated all Genesis components into the NodeKernel. In particular, + `RunNodeArgs` now has a new field + + ```haskell + rnGenesisConfig :: GenesisConfig + ``` + + This can be set to `Ouroboros.Consensus.Node.Genesis.disableGenesisConfig` to + keep the Praos behavior, or to `enableGenesisConfigDefault` to enable Genesis + with preliminary parameter choices. diff --git a/ouroboros-consensus/changelog.d/20240531_155125_alexander.esgen_milestone_13.md b/ouroboros-consensus/changelog.d/20240531_155125_alexander.esgen_milestone_13.md new file mode 100644 index 0000000000..a10734d1f0 --- /dev/null +++ b/ouroboros-consensus/changelog.d/20240531_155125_alexander.esgen_milestone_13.md @@ -0,0 +1,4 @@ +### Breaking + +- Internal refactorings in the CSJ and GDD. +- Improvements to the LoE. From cd032b10d29ef924ebd292182613736e1a8f690f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Tue, 21 May 2024 17:15:28 +0000 Subject: [PATCH 18/26] Rework `followsLoEFrag` into `trimToLoE` This commit brings several LoE-related improvements, namely: - The precondition of `followsLoEFrag` stipulating that the given candidate fragment had to intersect with the LoE fragment was often violated in the code, because the candidate fragments were anchored at the tip of the selection. - `followsLoEFrag` used to accept or reject fragments depending on whether they were LoE-compliant or not. The new verson trims them to their longest LoE-compliant prefix. Following this change, `followsLoEFrag` has been renamed to `timToLoE`. It does not filter candidates out anymore. - This last change makes `computeLoEMaxExtra` redundant. We get rid of it entirely and clean up the functions `maximalCandidates` and `extendWithSuccessors` accordingly. --- .../Storage/ChainDB/Impl/ChainSel.hs | 139 ++++++++---------- .../Consensus/Storage/ChainDB/Impl/Paths.hs | 22 ++- 2 files changed, 69 insertions(+), 92 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index bbe9473b6c..9965743b4f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -35,11 +35,10 @@ import Data.List.NonEmpty (NonEmpty) import qualified Data.List.NonEmpty as NE import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map -import Data.Maybe (isJust, isNothing) +import Data.Maybe (fromJust, isJust, isNothing) import Data.Maybe.Strict (StrictMaybe (..), strictMaybeToMaybe) import Data.Set (Set) import qualified Data.Set as Set -import Data.Word (Word64) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config @@ -176,7 +175,9 @@ initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid suffixesAfterI :: [NonEmpty (HeaderHash blk)] suffixesAfterI = Paths.maximalCandidates succsOf limit (AF.anchorToPoint i) where - limit = k <$ loE + limit = case loE of + LoEDisabled -> Nothing + LoEEnabled () -> Just k constructChain :: NonEmpty (HeaderHash blk) @@ -579,25 +580,17 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do return tipPoint -- The block fits onto the end of our current chain - | pointHash tipPoint == headerPrevHash hdr - -- TODO could be optimized if necessary/easy enough - , let newBlockFrag = curChain AF.:> hdr - , Just maxExtra <- computeLoEMaxExtra loeFrag newBlockFrag -> do + | pointHash tipPoint == headerPrevHash hdr -> do -- ### Add to current chain traceWith addBlockTracer (TryAddToCurrentChain p) - addToCurrentChain succsOf' curChainAndLedger loeFrag maxExtra + addToCurrentChain succsOf' curChainAndLedger loeFrag -- The block is reachable from the current selection -- and it doesn't fit after the current selection - | Just diff <- Paths.isReachable lookupBlockInfo' curChain p - -- TODO could be optimized if necessary/easy enough - , let curChain' = - AF.mapAnchoredFragment (castHeaderFields . getHeaderFields) curChain - , Just newBlockFrag <- Diff.apply curChain' diff - , Just maxExtra <- computeLoEMaxExtra loeFrag newBlockFrag -> do + | Just diff <- Paths.isReachable lookupBlockInfo' curChain p -> do -- ### Switch to a fork traceWith addBlockTracer (TrySwitchToAFork p diff) - switchToAFork succsOf' lookupBlockInfo' curChainAndLedger loeFrag maxExtra diff + switchToAFork succsOf' lookupBlockInfo' curChainAndLedger loeFrag diff -- We cannot reach the block from the current selection | otherwise -> do @@ -651,12 +644,10 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- ^ The current chain and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment - -> LoE Word64 - -- ^ How many extra blocks to select after @b@ at most. -> m (Point blk) - addToCurrentChain succsOf curChainAndLedger loeFrag maxExtra = do + addToCurrentChain succsOf curChainAndLedger loeFrag = do -- Extensions of @B@ that do not exceed the LoE - let suffixesAfterB = Paths.maximalCandidates succsOf maxExtra (realPointToPoint p) + let suffixesAfterB = Paths.maximalCandidates succsOf Nothing (realPointToPoint p) -- Fragments that are anchored at @curHead@, i.e. suffixes of the -- current chain. @@ -675,10 +666,10 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do return $ AF.fromOldestFirst curHead (hdr : hdrs) let chainDiffs = NE.nonEmpty - $ map Diff.extend - $ filter (followsLoEFrag loeFrag) - $ NE.filter (preferAnchoredCandidate (bcfg chainSelEnv) curChain) - candidates + $ filter (preferAnchoredCandidate (bcfg chainSelEnv) curChain . Diff.getSuffix) + $ fmap (trimToLoE loeFrag curChainAndLedger) + $ fmap Diff.extend + $ NE.toList candidates -- All candidates are longer than the current chain, so they will be -- preferred over it, /unless/ the block we just added is an EBB, -- which has the same 'BlockNo' as the block before it, so when @@ -709,16 +700,45 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do curTip = castPoint $ AF.headPoint curChain curHead = AF.headAnchor curChain - -- Either frag extends loe or loe extends frag + -- | Trim the given candidate fragment to respect the LoE. -- - -- PRECONDITION: @AF.withinFragmentBounds (AF.anchorPoint frag) loe@ - followsLoEFrag :: LoE (AnchoredFragment (Header blk)) - -> AnchoredFragment (Header blk) - -> Bool - followsLoEFrag LoEDisabled _ = True - followsLoEFrag (LoEEnabled loe) frag = - AF.withinFragmentBounds (AF.headPoint loe) frag - || AF.withinFragmentBounds (AF.headPoint frag) loe + -- The returned fragment is such that: + -- + -- - It is a prefix of the given fragment. + -- - If it contains the tip of the LoE fragment, then it contains at most + -- @k@ block after it. + -- - If it does not contain the tip of the LoE fragment, then it is included + -- in the LoE fragment. + -- + -- The fragment is represented by the current chain and a diff with that + -- current chain. It is tempting to only consider the suffix of the diff, + -- but that would be incorrect, because the diff might not intersect with + -- the LoE fragment, because the diff suffix is anchored somewhere on the + -- current chain and LoE frag's tip might be older than that anchor. + -- + -- PRECONDITIONS: + -- + -- 1. The given 'ChainDiff' can apply on top of the given 'ChainAndLedger'. + -- 2. The LoE fragment intersects with the current selection. + trimToLoE :: + LoE (AnchoredFragment (Header blk)) -> + ChainAndLedger blk -> + ChainDiff (Header blk) -> + ChainDiff (Header blk) + trimToLoE LoEDisabled _ diff = diff + trimToLoE (LoEEnabled loe) curChain diff = + case Diff.apply (VF.validatedFragment curChain) diff of + Nothing -> error "trimToLoE: precondition 1 violated: the given 'ChainDiff' must apply on top of the given 'ChainAndLedger'" + Just cand -> + case AF.intersect cand loe of + Nothing -> error "trimToLoE: precondition 2 violated: the LoE fragment must intersect with the current selection" + Just (candPrefix, _, candSuffix, loeSuffix) -> + let trimmedCandSuffix = AF.takeOldest (fromIntegral k) candSuffix + trimmedCand = + if AF.null loeSuffix + then fromJust $ AF.join candPrefix trimmedCandSuffix + else candPrefix + in Diff.diff (VF.validatedFragment curChain) trimmedCand -- | We have found a 'ChainDiff' through the VolatileDB connecting the new -- block to the current chain. We'll call the intersection/anchor @x@. @@ -734,29 +754,29 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- ^ The current chain (anchored at @i@) and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment - -> LoE Word64 - -- ^ How many extra blocks to select after @b@ at most. -> ChainDiff (HeaderFields blk) -- ^ Header fields for @(x,b]@ -> m (Point blk) - switchToAFork succsOf lookupBlockInfo curChainAndLedger loeFrag maxExtra diff = do + switchToAFork succsOf lookupBlockInfo curChainAndLedger loeFrag diff = do -- We use a cache to avoid reading the headers from disk multiple -- times in case they're part of multiple forks that go through @b@. let initCache = Map.singleton (headerHash hdr) hdr chainDiffs <- - fmap (filter (followsLoEFrag loeFrag . Diff.getSuffix)) - - -- 4. Filter out candidates that are not preferred over the current + -- 5. Filter out candidates that are not preferred over the current -- chain. -- -- The suffixes all fork off from the current chain within @k@ -- blocks, so it satisfies the precondition of 'preferCandidate'. - . fmap + fmap ( filter ( preferAnchoredCandidate (bcfg chainSelEnv) curChain . Diff.getSuffix ) ) + -- 4. Trim fragments so that they follow the LoE, that is, they + -- extend the LoE or are extended by the LoE. Filter them out + -- otherwise. + . fmap (fmap (trimToLoE loeFrag curChainAndLedger)) -- 3. Translate the 'HeaderFields' to 'Header' by reading the -- headers from disk. . flip evalStateT initCache @@ -766,7 +786,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- for those candidates. . NE.filter (not . Diff.rollbackExceedsSuffix) -- 1. Extend the diff with candidates fitting on @B@ and not exceeding the LoE - . Paths.extendWithSuccessors succsOf lookupBlockInfo maxExtra + . Paths.extendWithSuccessors succsOf lookupBlockInfo $ diff case NE.nonEmpty chainDiffs of @@ -786,45 +806,6 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do curChain = VF.validatedFragment curChainAndLedger curTip = castPoint $ AF.headPoint curChain - -- | How many extra blocks to select at most after the tip of @newBlockFrag@ - -- according to the LoE. - -- - -- There are two cases to consider: - -- - -- 1. If @newBlockFrag@ and @loeFrag@ are on the same chain, then we cannot - -- select more than @loeLimit@ blocks after @loeFrag@. - -- - -- 2. If @newBlockFrag@ and @loeFrag@ are on different chains, then we - -- cannot select more than @loeLimit@ blocks after their intersection. - -- - -- In any case, 'Nothing' is returned if @newBlockFrag@ extends beyond - -- what LoE allows. - computeLoEMaxExtra :: - (HasHeader x, HeaderHash x ~ HeaderHash blk) - => LoE (AnchoredFragment (Header blk)) - -- ^ The fragment with the LoE as its tip, with the same anchor as - -- @curChain@. - -> AnchoredFragment x - -- ^ The fragment with the new block @b@ as its tip, with the same - -- anchor as @curChain@. - -> Maybe (LoE Word64) - computeLoEMaxExtra (LoEEnabled loeFrag) newBlockFrag = - -- Both fragments are on the same chain - if loeSuffixLength == 0 || rollback == 0 then - if rollback > k + loeSuffixLength - then Nothing - else Just $ LoEEnabled $ k + loeSuffixLength - rollback - else - if rollback > k - then Nothing - else Just $ LoEEnabled $ k - rollback - where - d = Diff.diff newBlockFrag loeFrag - rollback = Diff.getRollback d - loeSuffixLength = fromIntegral $ AF.length (Diff.getSuffix d) - computeLoEMaxExtra LoEDisabled _ = - Just LoEDisabled - mkSelectionChangedInfo :: AnchoredFragment (Header blk) -- ^ old chain -> AnchoredFragment (Header blk) -- ^ new chain diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs index 77f6c99ae5..7b876e06d9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Paths.hs @@ -64,19 +64,18 @@ type LookupBlockInfo blk = HeaderHash blk -> Maybe (VolatileDB.BlockInfo blk) -- NOTE: it is possible that no candidates are found, but don't forget that -- the chain (fragment) ending with @B@ is also a potential candidate. -- --- If ChainSel is using the LoE, the value passed in @lenLimit@ will be used --- to truncate the candidates so that no more than @k@ blocks can be selected --- beyond the LoE fragment. maximalCandidates :: forall blk. (ChainHash blk -> Set (HeaderHash blk)) -- ^ @filterByPredecessor@ - -> LoE Word64 -- ^ Max length of any candidate + -> Maybe Word64 -- ^ Optional max length of any candidate, used during initial + -- chain selection when LoE is enabled. -> Point blk -- ^ @B@ -> [NonEmpty (HeaderHash blk)] -- ^ Each element in the list is a list of hashes from which we can -- construct a fragment anchored at the point @B@. -maximalCandidates succsOf loeLimit b = mapMaybe (NE.nonEmpty . applyLoE) $ go (pointHash b) +maximalCandidates succsOf sizeLimit b = + mapMaybe (NE.nonEmpty . trimToSizeLimit) $ go (pointHash b) where go :: ChainHash blk -> [[HeaderHash blk]] go mbHash = case Set.toList $ succsOf mbHash of @@ -85,11 +84,9 @@ maximalCandidates succsOf loeLimit b = mapMaybe (NE.nonEmpty . applyLoE) $ go (p | next <- succs , candidate <- go (BlockHash next) ] - applyLoE - | LoEEnabled limit <- loeLimit - = take (fromIntegral limit) - | otherwise - = id + trimToSizeLimit = case sizeLimit of + Just limit -> take (fromIntegral limit) + Nothing -> id -- | Extend the 'ChainDiff' with the successors found by 'maximalCandidates'. -- @@ -105,17 +102,16 @@ extendWithSuccessors :: forall blk. HasHeader blk => (ChainHash blk -> Set (HeaderHash blk)) -> LookupBlockInfo blk - -> LoE Word64 -- ^ Max extra length for any suffix -> ChainDiff (HeaderFields blk) -> NonEmpty (ChainDiff (HeaderFields blk)) -extendWithSuccessors succsOf lookupBlockInfo loeLimit diff = +extendWithSuccessors succsOf lookupBlockInfo diff = case NE.nonEmpty extensions of Nothing -> diff NE.:| [] Just extensions' -> extensions' where extensions = [ foldl' Diff.append diff (lookupHeaderFields <$> candHashes) - | candHashes <- maximalCandidates succsOf loeLimit (castPoint (Diff.getTip diff)) + | candHashes <- maximalCandidates succsOf Nothing (castPoint (Diff.getTip diff)) ] lookupHeaderFields :: HeaderHash blk -> HeaderFields blk From 6b44bfb8093abb4cc2729538fcb0254a35deed70 Mon Sep 17 00:00:00 2001 From: Nicolas BACQUEY Date: Thu, 23 May 2024 10:37:24 +0200 Subject: [PATCH 19/26] Refactor `PointSchedule` to support test end time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit refactors the `PointSchedule` type from a `newtype` of `Peers (PeerSchedule blk)` to a datatype containing this schedule as well as an additional field `psMinEndTime`. This field describes a minimal absolute time that the test must reach. If all ticks are executed before this time is reached, an extra delay is inserted. This allows getting rid of all the places in which we added extra ticks only for the purpose of making the test run longer. At the cost of complexifying a bit the implementation of point schedules, this makes the semantics much clearer. The fact that `PointSchedule` is now a datatype makes it easy to later add other fields, for instance a field stating the initial tip points, instead of having to add zero-duration ticks at the beginning to set those up, or a field describing the origin of the absolute clock, instead of shifting the tick times. Co-authored-by: Nicolas “Niols” Jeannerod --- .../Consensus/Genesis/Setup/Classifiers.hs | 8 +- .../Test/Consensus/Genesis/Tests/CSJ.hs | 4 +- .../Genesis/Tests/DensityDisconnect.hs | 4 +- .../Test/Consensus/Genesis/Tests/LoE.hs | 10 ++- .../Test/Consensus/Genesis/Tests/LoP.hs | 36 ++++---- .../Test/Consensus/Genesis/Tests/Uniform.hs | 26 +++--- .../Test/Consensus/PeerSimulator/Run.hs | 49 ++++++----- .../PeerSimulator/Tests/LinkedThreads.hs | 11 +-- .../Consensus/PeerSimulator/Tests/Rollback.hs | 2 +- .../Consensus/PeerSimulator/Tests/Timeouts.hs | 15 ++-- .../Test/Consensus/PeerSimulator/Trace.hs | 9 ++ .../Test/Consensus/PointSchedule.hs | 51 +++++++----- .../Test/Consensus/PointSchedule/Shrinking.hs | 83 +++++++++++-------- .../PointSchedule/Shrinking/Tests.hs | 31 +++---- 14 files changed, 188 insertions(+), 151 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs index f9a7af93a8..fb4a59b1f1 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Setup/Classifiers.hs @@ -164,7 +164,7 @@ resultClassifiers GenesisTest{gtSchedule} RunGenesisTestResult{rgtrStateView} = StateView{svPeerSimulatorResults} = rgtrStateView adversaries :: [PeerId] - adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers $ unPointSchedule gtSchedule + adversaries = fmap AdversarialPeer $ Map.keys $ adversarialPeers $ psSchedule gtSchedule adversariesCount = fromIntegral $ length adversaries @@ -248,19 +248,19 @@ scheduleClassifiers GenesisTest{gtSchedule = schedule} = peerSch rollbacks :: Peers Bool - rollbacks = hasRollback <$> unPointSchedule schedule + rollbacks = hasRollback <$> psSchedule schedule adversaryRollback = any id $ adversarialPeers rollbacks honestRollback = any id $ honestPeers rollbacks - allAdversariesEmpty = all id $ adversarialPeers $ null <$> unPointSchedule schedule + allAdversariesEmpty = all id $ adversarialPeers $ null <$> psSchedule schedule isTrivial :: PeerSchedule TestBlock -> Bool isTrivial = \case [] -> True (t0, _):points -> all ((== t0) . fst) points - allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> unPointSchedule schedule + allAdversariesTrivial = all id $ adversarialPeers $ isTrivial <$> psSchedule schedule simpleHash :: HeaderHash block ~ TestHash => diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs index 3e38e72f09..9d74df135c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/CSJ.hs @@ -126,9 +126,9 @@ prop_CSJ happy synchronized = where genDuplicatedHonestSchedule :: GenesisTest TestBlock () -> Gen (PointSchedule TestBlock) genDuplicatedHonestSchedule gt@GenesisTest {gtExtraHonestPeers} = do - ps@PointSchedule {unPointSchedule = Peers {honestPeers, adversarialPeers}} <- genUniformSchedulePoints gt + ps@PointSchedule {psSchedule = Peers {honestPeers, adversarialPeers}} <- genUniformSchedulePoints gt pure $ ps { - unPointSchedule = + psSchedule = Peers.unionWithKey (\_ _ _ -> error "should not happen") ( peers' diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index fefeb18af9..0b0ddcd80f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -471,7 +471,7 @@ prop_densityDisconnectTriggersChainSel = ( \GenesisTest {gtBlockTree, gtSchedule} stateView@StateView {svTipBlock} -> let - othersCount = Map.size (adversarialPeers $ unPointSchedule gtSchedule) + othersCount = Map.size (adversarialPeers $ psSchedule gtSchedule) exnCorrect = case exceptionsByComponent ChainSyncClient stateView of [fromException -> Just DensityTooLow] -> True [] | othersCount == 0 -> True @@ -498,7 +498,7 @@ prop_densityDisconnectTriggersChainSel = (AF.Empty _) -> Origin (_ AF.:> tipBlock) -> At tipBlock advTip = getOnlyBranchTip tree - in PointSchedule $ peers' + in mkPointSchedule $ peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain up to the intersection. [[(Time 0, scheduleTipPoint trunkTip), diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index 215c7b8721..10f2f777e3 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -88,7 +88,7 @@ prop_adversaryHitsTimeouts timeoutsEnabled = (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock branchTip = getOnlyBranchTip tree - in PointSchedule $ peers' + psSchedule = peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of @@ -107,11 +107,13 @@ prop_adversaryHitsTimeouts timeoutsEnabled = -- intersection early, then waits more than the short wait timeout. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of -- the alternate branch forks from `Origin` - Nothing -> [(Time 11, scheduleTipPoint branchTip)] + Nothing -> [] -- the alternate branch forks from `intersect` Just intersect -> [ (Time 0, scheduleHeaderPoint intersect), - (Time 0, scheduleBlockPoint intersect), - (Time 11, scheduleBlockPoint intersect) + (Time 0, scheduleBlockPoint intersect) ] ] + -- We want to wait more than the short wait timeout + psMinEndTime = Time 11 + in PointSchedule {psSchedule, psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index d6b08caa57..552a10696d 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -77,14 +77,10 @@ prop_wait mustTimeout = dullSchedule _ (AF.Empty _) = error "requires a non-empty block tree" dullSchedule timeout (_ AF.:> tipBlock) = let offset :: DiffTime = if mustTimeout then 1 else -1 - in PointSchedule $ peersOnlyHonest $ - [ (Time 0, scheduleTipPoint tipBlock), - -- This last point does not matter, it is only here to leave the - -- connection open (aka. keep the test running) long enough to - -- pass the timeout by 'offset'. - (Time (timeout + offset), scheduleHeaderPoint tipBlock), - (Time (timeout + offset), scheduleBlockPoint tipBlock) - ] + in PointSchedule + { psSchedule = peersOnlyHonest [(Time 0, scheduleTipPoint tipBlock)] + , psMinEndTime = Time $ timeout + offset + } prop_waitBehindForecastHorizon :: Property prop_waitBehindForecastHorizon = @@ -107,11 +103,13 @@ prop_waitBehindForecastHorizon = dullSchedule :: (HasHeader blk) => AnchoredFragment blk -> PointSchedule blk dullSchedule (AF.Empty _) = error "requires a non-empty block tree" dullSchedule (_ AF.:> tipBlock) = - PointSchedule $ peersOnlyHonest $ - [ (Time 0, scheduleTipPoint tipBlock), - (Time 0, scheduleHeaderPoint tipBlock), - (Time 11, scheduleBlockPoint tipBlock) - ] + PointSchedule + { psSchedule = peersOnlyHonest $ + [ (Time 0, scheduleTipPoint tipBlock) + , (Time 0, scheduleHeaderPoint tipBlock) + ] + , psMinEndTime = Time 11 + } -- | Simple test where we serve all the chain at regular intervals, but just -- slow enough to lose against the LoP bucket. @@ -168,7 +166,7 @@ prop_serve mustTimeout = makeSchedule :: (HasHeader blk) => AnchoredFragment blk -> PointSchedule blk makeSchedule (AF.Empty _) = error "fragment must have at least one block" makeSchedule fragment@(_ AF.:> tipBlock) = - PointSchedule $ peersOnlyHonest $ + mkPointSchedule $ peersOnlyHonest $ (Time 0, scheduleTipPoint tipBlock) : ( flip concatMap (zip [1 ..] (AF.toOldestFirst fragment)) $ \(i, block) -> [ (Time (secondsRationalToDiffTime (i * timeBetweenBlocks)), scheduleHeaderPoint block), @@ -223,7 +221,7 @@ prop_delayAttack lopEnabled = (AF.Empty _) -> Nothing (_ AF.:> tipBlock) -> Just tipBlock branchTip = getOnlyBranchTip tree - in PointSchedule $ peers' + psSchedule = peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain. [ (Time 0, scheduleTipPoint trunkTip) : case intersectM of @@ -242,11 +240,13 @@ prop_delayAttack lopEnabled = -- past the intersection, and wait for LoP bucket. [ (Time 0, scheduleTipPoint branchTip) : case intersectM of -- the alternate branch forks from `Origin` - Nothing -> [(Time 11, scheduleTipPoint branchTip)] + Nothing -> [] -- the alternate branch forks from `intersect` Just intersect -> [ (Time 0, scheduleHeaderPoint intersect), - (Time 0, scheduleBlockPoint intersect), - (Time 11, scheduleBlockPoint intersect) + (Time 0, scheduleBlockPoint intersect) ] ] + -- Wait for LoP bucket to empty + psMinEndTime = Time 11 + in PointSchedule {psSchedule, psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index 709426588e..7ebdd0a84e 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -95,7 +95,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = immutableTipIsRecent ] where - advCount = Map.size (adversarialPeers (unPointSchedule $ gtSchedule genesisTest)) + advCount = Map.size (adversarialPeers (psSchedule $ gtSchedule genesisTest)) immutableTipIsRecent = counterexample ("Age of the immutable tip: " ++ show immutableTipAge) $ @@ -129,7 +129,7 @@ theProperty genesisTest stateView@StateView{svSelectedChain} = [] -> "No peers were disconnected" peers -> "Some peers were disconnected: " ++ intercalate ", " (condense <$> peers) - honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ unPointSchedule $ gtSchedule genesisTest + honestTipSlot = At $ blockSlot $ snd $ last $ mapMaybe fromBlockPoint $ getHonestPeer $ honestPeers $ psSchedule $ gtSchedule genesisTest GenesisTest {gtBlockTree, gtGenesisWindow = GenesisWindow s, gtDelay = Delta d} = genesisTest @@ -224,9 +224,9 @@ prop_leashingAttackStalling = -- timeouts to disconnect adversaries. genLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) genLeashingSchedule genesisTest = do - Peers honest advs0 <- unPointSchedule . ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest - advs <- mapM dropRandomPoints advs0 - pure $ PointSchedule $ Peers honest advs + ps@PointSchedule{psSchedule = sch} <- ensureScheduleDuration genesisTest <$> genUniformSchedulePoints genesisTest + advs <- mapM dropRandomPoints $ adversarialPeers sch + pure $ ps {psSchedule = sch {adversarialPeers = advs}} disableBoringTimeouts gt = gt { gtChainSyncTimeouts = (gtChainSyncTimeouts gt) @@ -279,26 +279,20 @@ prop_leashingAttackTimeLimited = -- | A schedule which doesn't run past the last event of the honest peer genTimeLimitedSchedule :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) genTimeLimitedSchedule genesisTest = do - Peers honests advs0 <- unPointSchedule <$> genUniformSchedulePoints genesisTest + Peers honests advs0 <- psSchedule <$> genUniformSchedulePoints genesisTest let timeLimit = estimateTimeBound (gtChainSyncTimeouts genesisTest) (gtLoPBucketParams genesisTest) (getHonestPeer honests) (Map.elems advs0) advs = fmap (takePointsUntil timeLimit) advs0 - extendedHonests = extendScheduleUntil timeLimit <$> honests - pure $ PointSchedule $ Peers extendedHonests advs + pure $ PointSchedule + { psSchedule = Peers honests advs + , psMinEndTime = timeLimit + } takePointsUntil limit = takeWhile ((<= limit) . fst) - extendScheduleUntil - :: Time -> [(Time, SchedulePoint TestBlock)] -> [(Time, SchedulePoint TestBlock)] - extendScheduleUntil t [] = [(t, ScheduleTipPoint Origin)] - extendScheduleUntil t xs = - let (t', p) = last xs - in if t < t' then xs - else xs ++ [(t, p)] - disableBoringTimeouts gt = gt { gtChainSyncTimeouts = (gtChainSyncTimeouts gt) { canAwaitTimeout = Nothing diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index c88cc36e86..586f7776ca 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -14,6 +14,7 @@ import Control.Monad (foldM, forM, void) import Control.Monad.Class.MonadTime (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) +import Data.Coerce (coerce) import Data.Foldable (for_) import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) @@ -56,7 +57,7 @@ import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PeerSimulator.Trace import Test.Consensus.PointSchedule (BlockFetchTimeout, CSJParams (..), GenesisTest (..), GenesisTestFull, - LoPBucketParams (..), PointSchedule (..), + LoPBucketParams (..), PointSchedule (..), peersStates, peersStatesRelative) import Test.Consensus.PointSchedule.NodeState (NodeState) import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId, @@ -203,6 +204,23 @@ startBlockFetchConnectionThread BlockFetch.runBlockFetchServer tracer srPeerId tracers bfrServer serverChannel pure (clientThread, serverThread) +-- | Wait for the given duration, but if the duration is longer than the minimum +-- duration in the live cycle, shutdown the node and restart it after the delay. +smartDelay :: + (MonadDelay m) => + NodeLifecycle blk m -> + LiveNode blk m -> + DiffTime -> + m (LiveNode blk m) +smartDelay NodeLifecycle {nlMinDuration, nlStart, nlShutdown} node duration + | Just minInterval <- nlMinDuration, duration > minInterval = do + results <- nlShutdown node + threadDelay duration + nlStart results +smartDelay _ node duration = do + threadDelay duration + pure node + -- | The 'Tick' contains a state update for a specific peer. -- If the peer has not terminated by protocol rules, this will update its TMVar -- with the new state, thereby unblocking the handler that's currently waiting @@ -223,25 +241,11 @@ dispatchTick tracer varHandles peers lifecycle node (number, (duration, Peer pid Just PeerResources {prUpdateState} -> do traceNewTick atomically (prUpdateState state) - newNode <- checkDowntime + newNode <- smartDelay lifecycle node duration traceWith (lnStateTracer newNode) () pure newNode Nothing -> error "“The impossible happened,” as GHC would say." where - checkDowntime - | Just minInterval <- nlMinDuration - , duration > minInterval - = do - results <- nlShutdown node - threadDelay duration - nlStart results - | otherwise - = do - threadDelay duration - pure node - - NodeLifecycle {nlMinDuration, nlStart, nlShutdown} = lifecycle - traceNewTick :: m () traceNewTick = do currentChain <- atomically $ ChainDB.getCurrentChain (lnChainDb node) @@ -273,10 +277,17 @@ runScheduler :: Map PeerId (PeerResources m blk) -> NodeLifecycle blk m -> m (ChainDB m blk, StateViewTracers blk m) -runScheduler tracer varHandles ps peers lifecycle@NodeLifecycle {nlStart} = do +runScheduler tracer varHandles ps@PointSchedule{psMinEndTime} peers lifecycle@NodeLifecycle {nlStart} = do node0 <- nlStart LiveIntervalResult {lirActive = Map.keysSet peers, lirPeerResults = []} traceWith tracer TraceBeginningOfTime - LiveNode {lnChainDb, lnStateViewTracers} <- foldM tick node0 (zip [0..] (peersStatesRelative ps)) + nodeEnd <- foldM tick node0 (zip [0..] (peersStatesRelative ps)) + let extraDelay = case take 1 $ reverse $ peersStates ps of + [(t, _)] -> if t < psMinEndTime + then Just $ diffTime psMinEndTime t + else Nothing + _ -> Just $ coerce psMinEndTime + LiveNode{lnChainDb, lnStateViewTracers} <- + maybe (pure nodeEnd) (smartDelay lifecycle nodeEnd) extraDelay traceWith tracer TraceEndOfTime pure (lnChainDb, lnStateViewTracers) where @@ -468,7 +479,7 @@ runPointSchedule :: m (StateView TestBlock) runPointSchedule schedulerConfig genesisTest tracer0 = withRegistry $ \registry -> do - peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds $ unPointSchedule gtSchedule) + peerSim <- makePeerSimulatorResources tracer gtBlockTree (NonEmpty.fromList $ getPeerIds $ psSchedule gtSchedule) lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim (chainDb, stateViewTracers) <- runScheduler (Tracer $ traceWith tracer . TraceSchedulerEvent) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs index d1e4d8e2a5..a94c69a968 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs @@ -21,8 +21,8 @@ import Test.Consensus.PeerSimulator.Run (defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule import Test.Consensus.PointSchedule.Peers (peersOnlyHonest) -import Test.Consensus.PointSchedule.SinglePeer (scheduleBlockPoint, - scheduleHeaderPoint, scheduleTipPoint) +import Test.Consensus.PointSchedule.SinglePeer (scheduleHeaderPoint, + scheduleTipPoint) import Test.QuickCheck import Test.Tasty import Test.Tasty.QuickCheck @@ -67,8 +67,9 @@ prop_chainSyncKillsBlockFetch = do let (firstBlock, secondBlock) = case AF.toOldestFirst $ btTrunk gtBlockTree of b1 : b2 : _ -> (b1, b2) _ -> error "block tree must have two blocks" - in PointSchedule $ peersOnlyHonest $ + psSchedule = peersOnlyHonest $ [ (Time 0, scheduleTipPoint secondBlock), - (Time 0, scheduleHeaderPoint firstBlock), - (Time (timeout + 1), scheduleBlockPoint firstBlock) + (Time 0, scheduleHeaderPoint firstBlock) ] + psMinEndTime = Time $ timeout + 1 + in PointSchedule {psSchedule, psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs index dc27bbc176..17509dc458 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs @@ -95,7 +95,7 @@ rollbackSchedule n blockTree = , banalSchedulePoints trunkSuffix , banalSchedulePoints (btbSuffix branch) ] - in PointSchedule $ peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints + in mkPointSchedule $ peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints where banalSchedulePoints :: AnchoredFragment blk -> [SchedulePoint blk] banalSchedulePoints = concatMap banalSchedulePoints' . toOldestFirst diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs index 3cdde2a1e0..0c594c67e3 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs @@ -63,12 +63,11 @@ prop_timeouts mustTimeout = do dullSchedule _ (AF.Empty _) = error "requires a non-empty block tree" dullSchedule timeout (_ AF.:> tipBlock) = let offset :: DiffTime = if mustTimeout then 1 else -1 - in PointSchedule $ peersOnlyHonest $ [ - (Time 0, scheduleTipPoint tipBlock), - (Time 0, scheduleHeaderPoint tipBlock), - (Time 0, scheduleBlockPoint tipBlock), - -- This last point does not matter, it is only here to leave the - -- connection open (aka. keep the test running) long enough to - -- pass the timeout by 'offset'. - (Time (timeout + offset), scheduleTipPoint tipBlock) + psSchedule = peersOnlyHonest $ [ + (Time 0, scheduleTipPoint tipBlock), + (Time 0, scheduleHeaderPoint tipBlock), + (Time 0, scheduleBlockPoint tipBlock) ] + -- This keeps the test running long enough to pass the timeout by 'offset'. + psMinEndTime = Time $ timeout + offset + in PointSchedule {psSchedule, psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs index 45711215ac..f003dfe447 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Trace.hs @@ -64,6 +64,8 @@ data TraceSchedulerEvent blk TraceBeginningOfTime | -- | Right after running the last tick of the schedule. TraceEndOfTime + | -- | An extra optional delay to keep the simulation running + TraceExtraDelay DiffTime | -- | When beginning a new tick. Contains the tick number (counting from -- @0@), the duration of the tick, the states, the current chain, the -- candidate fragment, and the jumping states. @@ -196,6 +198,13 @@ traceSchedulerEventTestBlockWith setTickTime tracer0 _tracer = \case [ "╶──────────────────────────────────────────────────────────────────────────────╴", "Finished running point schedule" ] + TraceExtraDelay delay -> do + time <- getMonotonicTime + traceLinesWith tracer0 + [ "┌──────────────────────────────────────────────────────────────────────────────┐", + "└─ " ++ prettyTime time, + "Waiting an extra delay to keep the simulation running for: " ++ prettyTime (Time delay) + ] TraceNewTick number duration (Peer pid state) currentChain mCandidateFrag jumpingStates -> do time <- getMonotonicTime setTickTime time diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs index 4c4520039f..aa4ec8f315 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs @@ -37,6 +37,7 @@ module Test.Consensus.PointSchedule ( , ensureScheduleDuration , genesisNodeState , longRangeAttack + , mkPointSchedule , peerSchedulesBlocks , peerStates , peersStates @@ -52,15 +53,13 @@ import Control.Monad (replicateM) import Control.Monad.Class.MonadTime.SI (Time (Time), addTime, diffTime) import Control.Monad.ST (ST) -import Data.Foldable (toList) import Data.Functor (($>)) import Data.List (mapAccumL, partition, scanl') import qualified Data.Map.Strict as Map import Data.Maybe (catMaybes, fromMaybe, mapMaybe) import Data.Time (DiffTime) import Data.Word (Word64) -import Ouroboros.Consensus.Block.Abstract (WithOrigin (..), - withOriginToMaybe) +import Ouroboros.Consensus.Block.Abstract (withOriginToMaybe) import Ouroboros.Consensus.Ledger.SupportsProtocol (GenesisWindow (..)) import Ouroboros.Consensus.Network.NodeToNode (ChainSyncTimeout (..)) @@ -99,8 +98,9 @@ prettyPointSchedule :: PointSchedule blk -> [String] prettyPointSchedule peers = - [ "honest peers: " ++ show (Map.size (honestPeers $ unPointSchedule peers)) - , "adversaries: " ++ show (Map.size (adversarialPeers $ unPointSchedule peers)) + [ "honest peers: " ++ show (Map.size (honestPeers $ psSchedule peers)) + , "adversaries: " ++ show (Map.size (adversarialPeers $ psSchedule peers)) + , "minimal duration: " ++ show (psMinEndTime peers) ] ++ zipWith3 (\number time peerState -> @@ -157,7 +157,8 @@ peerStates Peer {name, value = schedulePoints} = -- -- The resulting schedule contains all the peers. Items are sorted by time. peersStates :: PointSchedule blk -> [(Time, Peer (NodeState blk))] -peersStates peers = foldr (mergeOn fst) [] (peerStates <$> toList (peersList $ unPointSchedule peers)) +peersStates PointSchedule{psSchedule} = + foldr (mergeOn fst) [] (peerStates <$> peersList psSchedule) -- | Same as 'peersStates' but returns the duration of a state instead of the -- absolute time at which it starts holding. @@ -173,13 +174,21 @@ type PeerSchedule blk = [(Time, SchedulePoint blk)] peerScheduleBlocks :: (PeerSchedule blk) -> [blk] peerScheduleBlocks = mapMaybe (withOriginToMaybe . schedulePointToBlock . snd) -newtype PointSchedule blk = PointSchedule { - unPointSchedule :: Peers (PeerSchedule blk) +data PointSchedule blk = PointSchedule { + -- | The actual point schedule + psSchedule :: Peers (PeerSchedule blk), + -- | Minimum duration for the simulation of this point schedule. + -- If no point in the schedule is larger than 'psMinEndTime', + -- the simulation will still run until this time is reached. + psMinEndTime :: Time } +mkPointSchedule :: Peers (PeerSchedule blk) -> PointSchedule blk +mkPointSchedule sch = PointSchedule sch $ Time 0 + -- | List of all blocks appearing in the schedules. -peerSchedulesBlocks :: PointSchedule blk -> [blk] -peerSchedulesBlocks = concatMap (peerScheduleBlocks . value) . toList . peersList . unPointSchedule +peerSchedulesBlocks :: Peers (PeerSchedule blk) -> [blk] +peerSchedulesBlocks = concatMap (peerScheduleBlocks . value) . peersList ---------------------------------------------------------------------------------------------------- -- Schedule generators @@ -199,7 +208,7 @@ longRangeAttack :: longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do honest <- peerScheduleFromTipPoints g honParams [(IsTrunk, [AF.length btTrunk - 1])] btTrunk [] adv <- peerScheduleFromTipPoints g advParams [(IsBranch, [AF.length (btbFull branch) - 1])] btTrunk [btbFull branch] - pure $ PointSchedule $ peers' [honest] [adv] + pure $ mkPointSchedule $ peers' [honest] [adv] where honParams = defaultPeerScheduleParams {pspHeaderDelayInterval = (0.3, 0.4)} advParams = defaultPeerScheduleParams {pspTipDelayInterval = (0, 0.1)} @@ -245,7 +254,7 @@ uniformPointsWithExtraHonestPeers honests <- replicateM (extraHonestPeers + 1) $ mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] advs <- takeBranches btBranches - pure $ PointSchedule $ peers' honests advs + pure $ mkPointSchedule $ peers' honests advs where takeBranches = \case [] -> pure [] @@ -374,7 +383,7 @@ uniformPointsWithExtraHonestPeersAndDowntime mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] advs <- takeBranches pauseSlot btBranches let (honests', advs') = syncTips honests advs - pure $ PointSchedule $ peers' honests' advs' + pure $ mkPointSchedule $ peers' honests' advs' where takeBranches pause = \case [] -> pure [] @@ -535,16 +544,12 @@ stToGen gen = do seed :: QCGen <- arbitrary pure (runSTGen_ seed gen) -duplicateLastPoint - :: DiffTime -> [(Time, SchedulePoint blk)] -> [(Time, SchedulePoint blk)] -duplicateLastPoint d [] = [(Time d, ScheduleTipPoint Origin)] -duplicateLastPoint d xs = - let (t, p) = last xs - in xs ++ [(addTime d t, p)] - ensureScheduleDuration :: GenesisTest blk a -> PointSchedule blk -> PointSchedule blk -ensureScheduleDuration gt ps = - PointSchedule $ duplicateLastPoint endingDelay <$> unPointSchedule ps +ensureScheduleDuration gt PointSchedule{psSchedule, psMinEndTime} = + PointSchedule + { psSchedule + , psMinEndTime = max psMinEndTime (Time endingDelay) + } where endingDelay = let cst = gtChainSyncTimeouts gt @@ -555,4 +560,4 @@ ensureScheduleDuration gt ps = , busyTimeout bft , streamingTimeout bft ]) - peerCount = length (peersList $ unPointSchedule ps) + peerCount = length (peersList psSchedule) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index f4fb4ef54f..820f844b22 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -12,6 +12,7 @@ module Test.Consensus.PointSchedule.Shrinking ( import Control.Monad.Class.MonadTime.SI (DiffTime, Time, addTime, diffTime) import Data.Containers.ListUtils (nubOrd) +import Data.Foldable (toList) import Data.Functor ((<&>)) import qualified Data.Map.Strict as Map import Data.Maybe (mapMaybe) @@ -29,41 +30,65 @@ import Test.QuickCheck (shrinkList) import Test.Util.TestBlock (TestBlock, isAncestorOf, isStrictAncestorOf) --- | Shrink a 'PointSchedule'. This does not affect the honest peer; it --- does, however, attempt to remove other peers or ticks of other peers. The --- block tree is trimmed to keep only parts that are necessary for the shrunk --- schedule. +-- | Shrink a 'PointSchedule'. We use a different logic to shrink honest and +-- adversarial peers. For adversarial peers, we just remove arbitrary points, +-- or peers altogether. For honest peers, we "speed up" the schedule by merging +-- adjacent points. +-- The block tree is trimmed to keep only parts that are necessary for the shrunk +-- schedules. shrinkPeerSchedules :: GenesisTestFull TestBlock -> StateView TestBlock -> [GenesisTestFull TestBlock] -shrinkPeerSchedules genesisTest _stateView = - let trimmedBlockTree sch = trimBlockTree' sch (gtBlockTree genesisTest) +shrinkPeerSchedules genesisTest@GenesisTest{gtBlockTree, gtSchedule} _stateView = + let PointSchedule {psSchedule} = gtSchedule + simulationDuration = duration gtSchedule + trimmedBlockTree sch = trimBlockTree' sch gtBlockTree shrunkAdversarialPeers = - shrinkAdversarialPeers shrinkAdversarialPeer (unPointSchedule $ gtSchedule genesisTest) + shrinkAdversarialPeers shrinkAdversarialPeer psSchedule <&> \shrunkSchedule -> genesisTest - { gtSchedule = PointSchedule shrunkSchedule, - gtBlockTree = trimmedBlockTree $ PointSchedule shrunkSchedule + { gtSchedule = PointSchedule + { psSchedule = shrunkSchedule + , psMinEndTime = simulationDuration + } + , gtBlockTree = trimmedBlockTree shrunkSchedule } shrunkHonestPeers = shrinkHonestPeers - (gtSchedule genesisTest) - -- No need to update the tree here, shrinking the honest peers never discards blocks - <&> \shrunkSchedule -> genesisTest {gtSchedule = shrunkSchedule} + psSchedule + -- No need to update the tree here, shrinking the honest peers never discards blocks + <&> \shrunkSchedule -> genesisTest + { gtSchedule = PointSchedule + { psSchedule = shrunkSchedule + , psMinEndTime = simulationDuration + } + } in shrunkAdversarialPeers ++ shrunkHonestPeers -- | Shrink a 'PointSchedule' by removing adversaries. This does not affect --- the honest peer; and it does not remove ticks from the schedules of the +-- the honest peers; and it does not remove ticks from the schedules of the -- remaining adversaries. shrinkByRemovingAdversaries :: GenesisTestFull TestBlock -> StateView TestBlock -> [GenesisTestFull TestBlock] -shrinkByRemovingAdversaries genesisTest _stateView = - shrinkAdversarialPeers (const []) (unPointSchedule $ gtSchedule genesisTest) <&> \shrunkSchedule -> - let trimmedBlockTree = trimBlockTree' (PointSchedule shrunkSchedule) (gtBlockTree genesisTest) - in (genesisTest{gtSchedule = PointSchedule shrunkSchedule, gtBlockTree = trimmedBlockTree}) +shrinkByRemovingAdversaries genesisTest@GenesisTest{gtSchedule, gtBlockTree} _stateView = + shrinkAdversarialPeers (const []) (psSchedule gtSchedule) <&> \shrunkSchedule -> + let + trimmedBlockTree = trimBlockTree' shrunkSchedule gtBlockTree + simulationDuration = duration gtSchedule + in genesisTest + { gtSchedule = PointSchedule + { psSchedule = shrunkSchedule + , psMinEndTime = simulationDuration + } + , gtBlockTree = trimmedBlockTree + } + +duration :: PointSchedule blk -> Time +duration PointSchedule {psSchedule, psMinEndTime} = + maximum $ psMinEndTime : [ t | sch <- toList psSchedule, (t, _) <- take 1 (reverse sch) ] -- | Shrink a 'PeerSchedule' by removing ticks from it. The other ticks are kept -- unchanged. @@ -82,30 +107,20 @@ shrinkAdversarialPeers shrink Peers {honestPeers, adversarialPeers} = -- 'PeerSchedule' at this point, there is no proper notion of a tick. Instead, -- we remove points from the honest 'PeerSchedule', and move all other points sooner. -- --- We check that this operation does not changes the final state of the honest peer, +-- We check that this operation does not changes the final state of the honest peers, -- that is, it keeps the same final tip point, header point, and block point. -- --- NOTE: This operation makes the honest peer to end its schedule sooner, which *may* +-- NOTE: This operation makes the honest peers end their schedule sooner, which *may* -- trigger disconnections when the timeout for MsgAwaitReply is reached. In those cases, -- it is probably more pertinent to disable this timeout in tests than to disable shrinking. -shrinkHonestPeers :: PointSchedule blk -> [PointSchedule blk] -shrinkHonestPeers PointSchedule {unPointSchedule = Peers {honestPeers, adversarialPeers}} = do +shrinkHonestPeers :: Peers (PeerSchedule blk) -> [Peers (PeerSchedule blk)] +shrinkHonestPeers Peers {honestPeers, adversarialPeers} = do (k, honestSch) <- Map.toList honestPeers - let (lastHonest, _) = last honestSch shrunk <- shrinkHonestPeer honestSch - pure $ PointSchedule $ Peers + pure $ Peers { honestPeers = Map.insert k shrunk honestPeers - , adversarialPeers = fmap (extendAdversary lastHonest) adversarialPeers + , adversarialPeers } - where - -- Add an extra point at the end of the adversarial schedule if the honest one - -- was longer than it. Preserves the total duration of the simulation, so that - -- timeouts/LoP disconnections can still happen. - extendAdversary tLast = \case - [] -> [] - ps -> case last ps of - (t, p) | t < tLast -> ps ++ [(tLast, p)] - _ -> ps shrinkHonestPeer :: PeerSchedule blk -> [PeerSchedule blk] shrinkHonestPeer sch = @@ -147,7 +162,7 @@ speedUpTheSchedule sch (at, speedUpBy) = -- | Remove blocks from the given block tree that are not necessary for the -- given peer schedules. If entire branches are unused, they are removed. If the -- trunk is unused, then it remains as an empty anchored fragment. -trimBlockTree' :: PointSchedule TestBlock -> BlockTree TestBlock -> BlockTree TestBlock +trimBlockTree' :: Peers (PeerSchedule TestBlock) -> BlockTree TestBlock -> BlockTree TestBlock trimBlockTree' = keepOnlyAncestorsOf . peerSchedulesBlocks -- | Given some blocks and a block tree, keep only the prefix of the block tree diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index e85087205f..d715375a3f 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -1,4 +1,5 @@ {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} -- | Test properties of the shrinking functions @@ -40,24 +41,24 @@ lastM [] = Nothing lastM [a] = Just a lastM (_:ps) = lastM ps -samePeers :: PointSchedule blk -> PointSchedule blk -> Bool +samePeers :: Peers (PeerSchedule blk) -> Peers (PeerSchedule blk) -> Bool samePeers sch1 sch2 = - (keys $ adversarialPeers $ unPointSchedule sch1) - == (keys $ adversarialPeers $ unPointSchedule sch2) + (keys $ adversarialPeers sch1) + == (keys $ adversarialPeers sch2) -- | Checks whether at least one peer schedule in the second given peers schedule -- is shorter than its corresponding one in the fist given peers schedule. “Shorter” -- here means that it executes in less time. -isShorterThan :: PointSchedule blk -> PointSchedule blk -> Bool +isShorterThan :: Peers (PeerSchedule blk) -> Peers (PeerSchedule blk) -> Bool isShorterThan original shrunk = samePeers original shrunk && (or $ zipWith (\oldSch newSch -> (fst <$> lastM newSch) < (fst <$> lastM oldSch)) - (toList $ unPointSchedule original) - (toList $ unPointSchedule shrunk) + (toList original) + (toList shrunk) ) -doesNotChangeFinalState :: Eq blk => PointSchedule blk -> PointSchedule blk -> Bool +doesNotChangeFinalState :: Eq blk => Peers (PeerSchedule blk) -> Peers (PeerSchedule blk) -> Bool doesNotChangeFinalState original shrunk = samePeers original shrunk && (and $ zipWith @@ -66,8 +67,8 @@ doesNotChangeFinalState original shrunk = lastHP oldSch == lastHP newSch && lastBP oldSch == lastBP newSch ) - (toList $ unPointSchedule original) - (toList $ unPointSchedule shrunk) + (toList original) + (toList shrunk) ) where lastTP :: PeerSchedule blk -> Maybe (SchedulePoint blk) @@ -77,20 +78,20 @@ doesNotChangeFinalState original shrunk = lastBP :: PeerSchedule blk -> Maybe (SchedulePoint blk) lastBP sch = lastM $ mapMaybe (\case (_, p@(ScheduleBlockPoint _)) -> Just p ; _ -> Nothing) sch -checkShrinkProperty :: (PointSchedule TestBlock -> PointSchedule TestBlock -> Bool) -> Property +checkShrinkProperty :: (Peers (PeerSchedule TestBlock) -> Peers (PeerSchedule TestBlock) -> Bool) -> Property checkShrinkProperty prop = forAllBlind (genChains (choose (1, 4)) >>= genUniformSchedulePoints) - (\schedule -> + (\sch@PointSchedule{psSchedule, psMinEndTime} -> conjoin $ map (\shrunk -> counterexample ( "Original schedule:\n" - ++ unlines (map (" " ++) $ prettyPointSchedule schedule) + ++ unlines (map (" " ++) $ prettyPointSchedule sch) ++ "\nShrunk schedule:\n" - ++ unlines (map (" " ++) $ prettyPointSchedule shrunk) + ++ unlines (map (" " ++) $ prettyPointSchedule $ PointSchedule shrunk psMinEndTime) ) - (prop schedule shrunk) + (prop psSchedule shrunk) ) - (shrinkHonestPeers schedule) + (shrinkHonestPeers psSchedule) ) From 6500072ad6bad73445e296800443515e3458cedb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Mon, 27 May 2024 18:27:28 +0000 Subject: [PATCH 20/26] Introduce a collection of chainsync handles that synchronizes a map and a queue --- .../Ouroboros/Consensus/NodeKernel.hs | 19 ++--- .../Consensus/PeerSimulator/CSJInvariants.hs | 10 +-- .../Test/Consensus/PeerSimulator/ChainSync.hs | 12 ++-- .../Consensus/PeerSimulator/NodeLifecycle.hs | 4 +- .../Test/Consensus/PeerSimulator/Resources.hs | 7 +- .../Test/Consensus/PeerSimulator/Run.hs | 23 +++--- .../MiniProtocol/ChainSync/Client.hs | 12 ++-- .../MiniProtocol/ChainSync/Client/Jumping.hs | 58 ++++++++------- .../MiniProtocol/ChainSync/Client/State.hs | 70 ++++++++++++++++++- 9 files changed, 145 insertions(+), 70 deletions(-) diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index 0b16a1ae3d..351ed37333 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -63,8 +63,9 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Mempool import qualified Ouroboros.Consensus.MiniProtocol.BlockFetch.ClientInterface as BlockFetchClientInterface import Ouroboros.Consensus.MiniProtocol.ChainSync.Client - (ChainSyncClientHandle (..), ChainSyncState (..), - viewChainSyncState) + (ChainSyncClientHandle (..), + ChainSyncClientHandleCollection (..), ChainSyncState (..), + newChainSyncClientHandleCollection, viewChainSyncState) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck (SomeHeaderInFutureCheck) import Ouroboros.Consensus.Node.Genesis (GenesisNodeKernelArgs (..), @@ -142,7 +143,7 @@ data NodeKernel m addrNTN addrNTC blk = NodeKernel { , getGsmState :: STM m GSM.GsmState -- | The kill handle and exposed state for each ChainSync client. - , getChainSyncHandles :: StrictTVar m (Map (ConnectionId addrNTN) (ChainSyncClientHandle m blk)) + , getChainSyncHandles :: ChainSyncClientHandleCollection (ConnectionId addrNTN) m blk -- | Read the current peer sharing registry, used for interacting with -- the PeerSharing protocol @@ -244,7 +245,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers <&> \wd (_headers, lst) -> GSM.getDurationUntilTooOld wd (getTipSlot lst) , GSM.equivalent = (==) `on` (AF.headPoint . fst) - , GSM.getChainSyncStates = fmap cschState <$> readTVar varChainSyncHandles + , GSM.getChainSyncStates = fmap cschState <$> cschcMap varChainSyncHandles , GSM.getCurrentSelection = do headers <- ChainDB.getCurrentChain chainDB extLedgerState <- ChainDB.getCurrentLedger chainDB @@ -256,7 +257,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers , GSM.writeGsmState = \gsmState -> do atomicallyWithMonotonicTime $ \time -> do writeTVar varGsmState gsmState - handles <- readTVar varChainSyncHandles + handles <- cschcMap varChainSyncHandles traverse_ (($ time) . ($ gsmState) . cschGsmCallback) handles , GSM.isHaaSatisfied = do readTVar varOutboundConnectionsState <&> \case @@ -291,7 +292,7 @@ initNodeKernel args@NodeKernelArgs { registry, cfg, tracers chainDB (readTVar varGsmState) -- TODO GDD should only consider (big) ledger peers - (readTVar varChainSyncHandles) + (cschcMap varChainSyncHandles) varLoEFragment void $ forkLinkedThread registry "NodeKernel.blockForging" $ @@ -347,7 +348,7 @@ data InternalState m addrNTN addrNTC blk = IS { , chainDB :: ChainDB m blk , blockFetchInterface :: BlockFetchConsensusInterface (ConnectionId addrNTN) (Header blk) blk m , fetchClientRegistry :: FetchClientRegistry (ConnectionId addrNTN) (Header blk) blk m - , varChainSyncHandles :: StrictTVar m (Map (ConnectionId addrNTN) (ChainSyncClientHandle m blk)) + , varChainSyncHandles :: ChainSyncClientHandleCollection (ConnectionId addrNTN) m blk , varGsmState :: StrictTVar m GSM.GsmState , mempool :: Mempool m blk , peerSharingRegistry :: PeerSharingRegistry addrNTN m @@ -375,7 +376,7 @@ initInternalState NodeKernelArgs { tracers, chainDB, registry, cfg gsmMarkerFileView newTVarIO gsmState - varChainSyncHandles <- newTVarIO mempty + varChainSyncHandles <- atomically newChainSyncClientHandleCollection mempool <- openMempool registry (chainDBLedgerInterface chainDB) (configLedger cfg) @@ -386,7 +387,7 @@ initInternalState NodeKernelArgs { tracers, chainDB, registry, cfg fetchClientRegistry <- newFetchClientRegistry let getCandidates :: STM m (Map (ConnectionId addrNTN) (AnchoredFragment (Header blk))) - getCandidates = viewChainSyncState varChainSyncHandles csCandidate + getCandidates = viewChainSyncState (cschcMap varChainSyncHandles) csCandidate slotForgeTimeOracle <- BlockFetchClientInterface.initSlotForgeTimeOracle cfg chainDB let readFetchMode = BlockFetchClientInterface.readFetchModeDefault diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs index 7b83e04101..0d66e1f154 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/CSJInvariants.hs @@ -19,7 +19,7 @@ import Data.Typeable (Typeable) import Ouroboros.Consensus.Block (Point, StandardHash, castPoint) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State as CSState import Ouroboros.Consensus.Util.IOLike (Exception, MonadSTM (STM), - MonadThrow (throwIO), StrictTVar, readTVar) + MonadThrow (throwIO), readTVar) import Ouroboros.Consensus.Util.STM (Watcher (..)) -------------------------------------------------------------------------------- @@ -109,10 +109,10 @@ readAndView :: forall m peer blk. ( MonadSTM m ) => - StrictTVar m (Map peer (CSState.ChainSyncClientHandle m blk)) -> + STM m (Map peer (CSState.ChainSyncClientHandle m blk)) -> STM m (View peer blk) -readAndView handles = - traverse (fmap idealiseState . readTVar . CSState.cschJumping) =<< readTVar handles +readAndView readHandles = + traverse (fmap idealiseState . readTVar . CSState.cschJumping) =<< readHandles where -- Idealise the state of a ChainSync peer with respect to ChainSync jumping. -- In particular, we get rid of non-comparable information such as the TVars @@ -170,7 +170,7 @@ watcher :: Typeable blk, StandardHash blk ) => - StrictTVar m (Map peer (CSState.ChainSyncClientHandle m blk)) -> + STM m (Map peer (CSState.ChainSyncClientHandle m blk)) -> Watcher m (View peer blk) (View peer blk) watcher handles = Watcher diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs index 2197bea732..5678b4d7c9 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/ChainSync.hs @@ -13,7 +13,6 @@ module Test.Consensus.PeerSimulator.ChainSync ( import Control.Exception (SomeException) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (Tracer), nullTracer, traceWith) -import Data.Map.Strict (Map) import Data.Proxy (Proxy (..)) import Network.TypedProtocol.Codec (AnyMessage) import Ouroboros.Consensus.Block (Header, Point) @@ -21,15 +20,16 @@ import Ouroboros.Consensus.Config (TopLevelConfig (..)) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client - (CSJConfig (..), ChainDbView, ChainSyncClientHandle, - ChainSyncLoPBucketConfig, ChainSyncStateView (..), - Consensus, bracketChainSyncClient, chainSyncClient) + (CSJConfig (..), ChainDbView, + ChainSyncClientHandleCollection, ChainSyncLoPBucketConfig, + ChainSyncStateView (..), Consensus, bracketChainSyncClient, + chainSyncClient) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import Ouroboros.Consensus.Node.GsmState (GsmState (Syncing)) import Ouroboros.Consensus.Util (ShowProxy) import Ouroboros.Consensus.Util.IOLike (Exception (fromException), - IOLike, MonadCatch (try), StrictTVar) + IOLike, MonadCatch (try)) import Ouroboros.Network.Block (Tip) import Ouroboros.Network.Channel (Channel) import Ouroboros.Network.ControlMessage (ControlMessage (..)) @@ -124,7 +124,7 @@ runChainSyncClient :: -- ^ Configuration for ChainSync Jumping StateViewTracers blk m -> -- ^ Tracers used to record information for the future 'StateView'. - StrictTVar m (Map PeerId (ChainSyncClientHandle m blk)) -> + ChainSyncClientHandleCollection PeerId m blk -> -- ^ A TVar containing a map of states for each peer. This -- function will (via 'bracketChainSyncClient') register and de-register a -- TVar for the state of the peer. diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs index 3d6ea7d04e..993d1b1263 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs @@ -19,6 +19,8 @@ import Data.Set (Set) import qualified Data.Set as Set import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config (TopLevelConfig (..)) +import Ouroboros.Consensus.MiniProtocol.ChainSync.Client + (ChainSyncClientHandleCollection (..)) import Ouroboros.Consensus.Storage.ChainDB.API import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDB @@ -204,7 +206,7 @@ lifecycleStop resources LiveNode {lnStateViewTracers, lnCopyToImmDb, lnPeers} = releaseAll lrRegistry -- Reset the resources in TVars that were allocated by the simulator atomically $ do - modifyTVar psrHandles (const mempty) + cschcRemoveAllHandles psrHandles case lrLoEVar of LoEEnabled var -> modifyTVar var (const (AF.Empty AF.AnchorGenesis)) LoEDisabled -> pure () diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Resources.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Resources.hs index c4fe394a60..a594d9059c 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Resources.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Resources.hs @@ -25,7 +25,8 @@ import Data.Traversable (for) import Ouroboros.Consensus.Block (WithOrigin (Origin)) import Ouroboros.Consensus.Block.Abstract (Header, Point (..)) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client - (ChainSyncClientHandle) + (ChainSyncClientHandleCollection, + newChainSyncClientHandleCollection) import Ouroboros.Consensus.Util.IOLike (IOLike, MonadSTM (STM), StrictTVar, readTVar, uncheckedNewTVarM, writeTVar) import qualified Ouroboros.Network.AnchoredFragment as AF @@ -115,7 +116,7 @@ data PeerSimulatorResources m blk = -- | Handles to interact with the ChainSync client of each peer. -- See 'ChainSyncClientHandle' for more details. - psrHandles :: StrictTVar m (Map PeerId (ChainSyncClientHandle m TestBlock)) + psrHandles :: ChainSyncClientHandleCollection PeerId m TestBlock } -- | Create 'ChainSyncServerHandlers' for our default implementation using 'NodeState'. @@ -233,5 +234,5 @@ makePeerSimulatorResources tracer blockTree peers = do resources <- for peers $ \ peerId -> do peerResources <- makePeerResources tracer blockTree peerId pure (peerId, peerResources) - psrHandles <- uncheckedNewTVarM mempty + psrHandles <- atomically newChainSyncClientHandleCollection pure PeerSimulatorResources {psrPeers = Map.fromList $ toList resources, psrHandles} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index 586f7776ca..63c8897e33 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -26,7 +26,9 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client (CSJConfig (..), CSJEnabledConfig (..), ChainDbView, - ChainSyncClientHandle, ChainSyncLoPBucketConfig (..), + ChainSyncClientHandle, + ChainSyncClientHandleCollection (..), + ChainSyncLoPBucketConfig (..), ChainSyncLoPBucketEnabledConfig (..), viewChainSyncState) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.Node.GsmState as GSM @@ -147,7 +149,7 @@ startChainSyncConnectionThread :: ChainSyncLoPBucketConfig -> CSJConfig -> StateViewTracers blk m -> - StrictTVar m (Map PeerId (ChainSyncClientHandle m blk)) -> + ChainSyncClientHandleCollection PeerId m blk -> m (Thread m (), Thread m ()) startChainSyncConnectionThread registry @@ -230,7 +232,7 @@ smartDelay _ node duration = do dispatchTick :: forall m blk. IOLike m => Tracer m (TraceSchedulerEvent blk) -> - StrictTVar m (Map PeerId (ChainSyncClientHandle m blk)) -> + STM m (Map PeerId (ChainSyncClientHandle m blk)) -> Map PeerId (PeerResources m blk) -> NodeLifecycle blk m -> LiveNode blk m -> @@ -250,7 +252,7 @@ dispatchTick tracer varHandles peers lifecycle node (number, (duration, Peer pid traceNewTick = do currentChain <- atomically $ ChainDB.getCurrentChain (lnChainDb node) (csState, jumpingStates) <- atomically $ do - m <- readTVar varHandles + m <- varHandles csState <- traverse (readTVar . CSClient.cschState) (m Map.!? pid) jumpingStates <- forM (Map.toList m) $ \(peer, h) -> do st <- readTVar (CSClient.cschJumping h) @@ -272,7 +274,7 @@ dispatchTick tracer varHandles peers lifecycle node (number, (duration, Peer pid runScheduler :: IOLike m => Tracer m (TraceSchedulerEvent blk) -> - StrictTVar m (Map PeerId (ChainSyncClientHandle m blk)) -> + STM m (Map PeerId (ChainSyncClientHandle m blk)) -> PointSchedule blk -> Map PeerId (PeerResources m blk) -> NodeLifecycle blk m -> @@ -314,7 +316,7 @@ mkStateTracer :: m (Tracer m ()) mkStateTracer schedulerConfig GenesisTest {gtBlockTree} PeerSimulatorResources {psrHandles, psrPeers} chainDb | scTraceState schedulerConfig - , let getCandidates = viewChainSyncState psrHandles CSClient.csCandidate + , let getCandidates = viewChainSyncState (cschcMap psrHandles) CSClient.csCandidate getCurrentChain = ChainDB.getCurrentChain chainDb getPoints = traverse readTVar (srCurrentState . prShared <$> psrPeers) = peerSimStateDiagramSTMTracerDebug gtBlockTree getCurrentChain getCandidates getPoints @@ -335,7 +337,7 @@ startNode :: startNode schedulerConfig genesisTest interval = do let handles = psrHandles lrPeerSim - getCandidates = viewChainSyncState handles CSClient.csCandidate + getCandidates = viewChainSyncState (cschcMap handles) CSClient.csCandidate fetchClientRegistry <- newFetchClientRegistry let chainDbView = CSClient.defaultChainDbView lnChainDb activePeers = Map.restrictKeys (psrPeers lrPeerSim) (lirActive liveResult) @@ -384,10 +386,11 @@ startNode schedulerConfig genesisTest interval = do (mkGDDTracerTestBlock lrTracer) lnChainDb (pure GSM.Syncing) -- TODO actually run GSM - (readTVar handles) + (cschcMap handles) var - void $ forkLinkedWatcher lrRegistry "CSJ invariants watcher" $ CSJInvariants.watcher handles + void $ forkLinkedWatcher lrRegistry "CSJ invariants watcher" $ + CSJInvariants.watcher (cschcMap handles) where LiveResources {lrRegistry, lrTracer, lrConfig, lrPeerSim, lrLoEVar} = resources @@ -483,7 +486,7 @@ runPointSchedule schedulerConfig genesisTest tracer0 = lifecycle <- nodeLifecycle schedulerConfig genesisTest tracer registry peerSim (chainDb, stateViewTracers) <- runScheduler (Tracer $ traceWith tracer . TraceSchedulerEvent) - (psrHandles peerSim) + (cschcMap (psrHandles peerSim)) gtSchedule (psrPeers peerSim) lifecycle diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs index 8a08bff1d6..aeeaa0acd4 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs @@ -64,10 +64,12 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client ( , TraceChainSyncClientEvent (..) -- * State shared with other components , ChainSyncClientHandle (..) + , ChainSyncClientHandleCollection (..) , ChainSyncState (..) , ChainSyncStateView (..) , Jumping.noJumping , chainSyncStateFor + , newChainSyncClientHandleCollection , noIdling , noLoPBucket , viewChainSyncState @@ -228,11 +230,11 @@ newtype Our a = Our { unOur :: a } -- data from 'ChainSyncState'. viewChainSyncState :: IOLike m => - StrictTVar m (Map peer (ChainSyncClientHandle m blk)) -> + STM m (Map peer (ChainSyncClientHandle m blk)) -> (ChainSyncState blk -> a) -> STM m (Map peer a) viewChainSyncState varHandles f = - Map.map f <$> (traverse (readTVar . cschState) =<< readTVar varHandles) + Map.map f <$> (traverse (readTVar . cschState) =<< varHandles) -- | Convenience function for reading the 'ChainSyncState' for a single peer -- from a nested set of TVars. @@ -326,7 +328,7 @@ bracketChainSyncClient :: ) => Tracer m (TraceChainSyncClientEvent blk) -> ChainDbView m blk - -> StrictTVar m (Map peer (ChainSyncClientHandle m blk)) + -> ChainSyncClientHandleCollection peer m blk -- ^ The kill handle and states for each peer, we need the whole map because we -- (de)register nodes (@peer@). -> STM m GsmState @@ -399,8 +401,8 @@ bracketChainSyncClient insertHandle = atomicallyWithMonotonicTime $ \time -> do initialGsmState <- getGsmState updateLopBucketConfig lopBucket initialGsmState time - modifyTVar varHandles $ Map.insert peer handle - deleteHandle = atomically $ modifyTVar varHandles $ Map.delete peer + cschcAddHandle varHandles peer handle + deleteHandle = atomically $ cschcRemoveHandle varHandles peer bracket_ insertHandle deleteHandle $ f Jumping.noJumping withCSJCallbacks lopBucket csHandleState (CSJEnabled csjEnabledConfig) f = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs index e1281a8416..3b774db859 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs @@ -164,10 +164,10 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping ( import Cardano.Slotting.Slot (SlotNo (..), WithOrigin (..)) import Control.Monad (forM, forM_, when) +import Data.Foldable (toList) import Data.List (sortOn) -import Data.Map.Strict (Map) -import qualified Data.Map.Strict as Map import Data.Maybe (catMaybes, fromMaybe) +import qualified Data.Sequence.Strict as Seq import GHC.Generics (Generic) import Ouroboros.Consensus.Block (HasHeader (getHeaderFields), Header, Point (..), castPoint, pointSlot, succWithOrigin) @@ -175,6 +175,7 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State (ChainSyncClientHandle (..), + ChainSyncClientHandleCollection (..), ChainSyncJumpingJumperState (..), ChainSyncJumpingState (..), ChainSyncState (..), DisengagedInitState (..), DynamoInitState (..), @@ -257,16 +258,16 @@ mkJumping peerContext = Jumping -- -- Invariants: -- --- - If 'handlesVar' is not empty, then there is exactly one dynamo in it. --- - There is at most one objector in 'handlesVar'. --- - If there exist 'FoundIntersection' jumpers in 'handlesVar', then there +-- - If 'handlesCol is not empty, then there is exactly one dynamo in it. +-- - There is at most one objector in 'handlesCol. +-- - If there exist 'FoundIntersection' jumpers in 'handlesCol, then there -- is an objector and the intersection of the objector with the dynamo is -- at least as old as the oldest intersection of the `FoundIntersection` jumpers -- with the dynamo. data ContextWith peerField handleField m peer blk = Context { peer :: !peerField, handle :: !handleField, - handlesVar :: !(StrictTVar m (Map peer (ChainSyncClientHandle m blk))), + handlesCol :: !(ChainSyncClientHandleCollection peer m blk), jumpSize :: !SlotNo } @@ -276,12 +277,12 @@ type Context = ContextWith () () -- | A peer-specific context for ChainSync jumping. This is a 'ContextWith' -- pointing on the handler of the peer in question. -- --- Invariant: The binding from 'peer' to 'handle' is present in 'handlesVar'. +-- Invariant: The binding from 'peer' to 'handle' is present in 'handlesCol'. type PeerContext m peer blk = ContextWith peer (ChainSyncClientHandle m blk) m peer blk makeContext :: MonadSTM m => - StrictTVar m (Map peer (ChainSyncClientHandle m blk)) -> + ChainSyncClientHandleCollection peer m blk -> SlotNo -> -- ^ The size of jumps, in number of slots. STM m (Context m peer blk) @@ -427,8 +428,8 @@ onRollForward context point = setJumps (Just jumpInfo) = do writeTVar (cschJumping (handle context)) $ Dynamo DynamoStarted $ pointSlot $ AF.headPoint $ jTheirFragment jumpInfo - handles <- readTVar (handlesVar context) - forM_ (Map.elems handles) $ \h -> + handles <- cschcSeq (handlesCol context) + forM_ handles $ \(_, h) -> readTVar (cschJumping h) >>= \case Jumper nextJumpVar Happy{} -> writeTVar nextJumpVar (Just jumpInfo) _ -> pure () @@ -660,11 +661,11 @@ updateJumpInfo context jumpInfo = -- of the dynamo, or 'Nothing' if there is none. getDynamo :: (MonadSTM m) => - StrictTVar m (Map peer (ChainSyncClientHandle m blk)) -> + ChainSyncClientHandleCollection peer m blk -> STM m (Maybe (ChainSyncClientHandle m blk)) -getDynamo handlesVar = do - handles <- Map.elems <$> readTVar handlesVar - findM (\handle -> isDynamo <$> readTVar (cschJumping handle)) handles +getDynamo handlesCol = do + handles <- cschcSeq handlesCol + fmap snd <$> findM (\(_, handle) -> isDynamo <$> readTVar (cschJumping handle)) handles where isDynamo Dynamo{} = True isDynamo _ = False @@ -705,8 +706,7 @@ newJumper jumpInfo jumperState = do -- that peer. If there is no dynamo, the peer starts as dynamo; otherwise, it -- starts as a jumper. registerClient :: - ( Ord peer, - LedgerSupportsProtocol blk, + ( LedgerSupportsProtocol blk, IOLike m ) => Context m peer blk -> @@ -716,7 +716,7 @@ registerClient :: (StrictTVar m (ChainSyncJumpingState m blk) -> ChainSyncClientHandle m blk) -> STM m (PeerContext m peer blk) registerClient context peer csState mkHandle = do - csjState <- getDynamo (handlesVar context) >>= \case + csjState <- getDynamo (handlesCol context) >>= \case Nothing -> do fragment <- csCandidate <$> readTVar csState pure $ Dynamo DynamoStarted $ pointSlot $ AF.anchorPoint fragment @@ -725,7 +725,7 @@ registerClient context peer csState mkHandle = do newJumper mJustInfo (Happy FreshJumper Nothing) cschJumping <- newTVar csjState let handle = mkHandle cschJumping - modifyTVar (handlesVar context) $ Map.insert peer handle + cschcAddHandle (handlesCol context) peer handle pure $ context {peer, handle} -- | Unregister a client from a 'PeerContext'; this might trigger the election @@ -738,7 +738,7 @@ unregisterClient :: PeerContext m peer blk -> STM m () unregisterClient context = do - modifyTVar (handlesVar context) $ Map.delete (peer context) + cschcRemoveHandle (handlesCol context) (peer context) let context' = stripContext context readTVar (cschJumping (handle context)) >>= \case Disengaged{} -> pure () @@ -756,7 +756,7 @@ electNewDynamo :: Context m peer blk -> STM m () electNewDynamo context = do - peerStates <- Map.toList <$> readTVar (handlesVar context) + peerStates <- cschcSeq (handlesCol context) mDynamo <- findNonDisengaged peerStates case mDynamo of Nothing -> pure () @@ -781,22 +781,20 @@ electNewDynamo context = do isDisengaged Disengaged{} = True isDisengaged _ = False -findM :: Monad m => (a -> m Bool) -> [a] -> m (Maybe a) -findM _ [] = pure Nothing -findM p (x : xs) = p x >>= \case - True -> pure (Just x) - False -> findM p xs +findM :: (Foldable f, Monad m) => (a -> m Bool) -> f a -> m (Maybe a) +findM p = + foldr (\x mb -> p x >>= \case True -> pure (Just x); False -> mb) (pure Nothing) -- | Find the objector in a context, if there is one. findObjector :: (MonadSTM m) => Context m peer blk -> STM m (Maybe (ObjectorInitState, JumpInfo blk, Point (Header blk), ChainSyncClientHandle m blk)) -findObjector context = do - readTVar (handlesVar context) >>= go . Map.toList +findObjector context = + cschcSeq (handlesCol context) >>= go where - go [] = pure Nothing - go ((_, handle):xs) = + go Seq.Empty = pure Nothing + go ((_, handle) Seq.:<| xs) = readTVar (cschJumping handle) >>= \case Objector initState goodJump badPoint -> pure $ Just (initState, goodJump, badPoint, handle) @@ -809,7 +807,7 @@ electNewObjector :: Context m peer blk -> STM m () electNewObjector context = do - peerStates <- Map.toList <$> readTVar (handlesVar context) + peerStates <- toList <$> cschcSeq (handlesCol context) dissentingJumpers <- collectDissentingJumpers peerStates let sortedJumpers = sortOn (pointSlot . fst) dissentingJumpers case sortedJumpers of diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs index c75e3d3530..cba965fb96 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/State.hs @@ -9,6 +9,7 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State ( ChainSyncClientHandle (..) + , ChainSyncClientHandleCollection (..) , ChainSyncJumpingJumperState (..) , ChainSyncJumpingState (..) , ChainSyncState (..) @@ -17,10 +18,15 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.State ( , JumpInfo (..) , JumperInitState (..) , ObjectorInitState (..) + , newChainSyncClientHandleCollection ) where import Cardano.Slotting.Slot (SlotNo, WithOrigin) import Data.Function (on) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Sequence.Strict (StrictSeq) +import qualified Data.Sequence.Strict as Seq import Data.Typeable (Proxy (..), typeRep) import GHC.Generics (Generic) import Ouroboros.Consensus.Block (HasHeader, Header, Point) @@ -29,7 +35,7 @@ import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) import Ouroboros.Consensus.Node.GsmState (GsmState) import Ouroboros.Consensus.Util.IOLike (IOLike, NoThunks (..), STM, - StrictTVar, Time) + StrictTVar, Time, modifyTVar, newTVar, readTVar) import Ouroboros.Network.AnchoredFragment (AnchoredFragment, headPoint) @@ -95,6 +101,68 @@ deriving anyclass instance ( NoThunks (Header blk) ) => NoThunks (ChainSyncClientHandle m blk) +-- | A collection of ChainSync client handles for the peers of this node. +-- +-- Sometimes we want to see the collection as a Map, and sometimes as a sequence. +-- The implementation keeps both views in sync. +data ChainSyncClientHandleCollection peer m blk = ChainSyncClientHandleCollection { + -- | A map containing the handles for the peers in the collection + cschcMap :: !(STM m (Map peer (ChainSyncClientHandle m blk))) + -- | A sequence containing the handles for the peers in the collection + , cschcSeq :: !(STM m (StrictSeq (peer, ChainSyncClientHandle m blk))) + -- | Add the handle for the given peer to the collection + -- PRECONDITION: The peer is not already in the collection + , cschcAddHandle :: !(peer -> ChainSyncClientHandle m blk -> STM m ()) + -- | Remove the handle for the given peer from the collection + , cschcRemoveHandle :: !(peer -> STM m ()) + -- | Moves the handle for the given peer to the end of the sequence + , cschcRotateHandle :: !(peer -> STM m ()) + -- | Remove all the handles from the collection + , cschcRemoveAllHandles :: !(STM m ()) + } + deriving stock (Generic) + +deriving anyclass instance ( + IOLike m, + HasHeader blk, + LedgerSupportsProtocol blk, + NoThunks (STM m ()), + NoThunks (Header blk), + NoThunks (STM m (Map peer (ChainSyncClientHandle m blk))), + NoThunks (STM m (StrictSeq (peer, ChainSyncClientHandle m blk))) + ) => NoThunks (ChainSyncClientHandleCollection peer m blk) + +newChainSyncClientHandleCollection :: + ( Ord peer, + IOLike m, + LedgerSupportsProtocol blk, + NoThunks peer + ) + => STM m (ChainSyncClientHandleCollection peer m blk) +newChainSyncClientHandleCollection = do + handlesMap <- newTVar mempty + handlesSeq <- newTVar mempty + + return ChainSyncClientHandleCollection { + cschcMap = readTVar handlesMap + , cschcSeq = readTVar handlesSeq + , cschcAddHandle = \peer handle -> do + modifyTVar handlesMap (Map.insert peer handle) + modifyTVar handlesSeq (Seq.|> (peer, handle)) + , cschcRemoveHandle = \peer -> do + modifyTVar handlesMap (Map.delete peer) + modifyTVar handlesSeq $ \s -> + let (xs, ys) = Seq.spanl ((/= peer) . fst) s + in xs Seq.>< Seq.drop 1 ys + , cschcRotateHandle = \peer -> + modifyTVar handlesSeq $ \s -> + let (xs, ys) = Seq.spanl ((/= peer) . fst) s + in xs Seq.>< Seq.drop 1 ys Seq.>< Seq.take 1 ys + , cschcRemoveAllHandles = do + modifyTVar handlesMap (const mempty) + modifyTVar handlesSeq (const mempty) + } + data DynamoInitState blk = -- | The dynamo has not yet started jumping and we first need to jump to the -- given jump info to set the intersection of the ChainSync server. From 72ac137a7d579068350d28b61987b440916c6a87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Mon, 27 May 2024 19:08:54 +0000 Subject: [PATCH 21/26] Implement a call to rotate dynamos in CSJ --- .../MiniProtocol/ChainSync/Client/Jumping.hs | 157 +++++++++++++----- 1 file changed, 119 insertions(+), 38 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs index 3b774db859..35f76ce2be 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs @@ -75,6 +75,13 @@ -- when the client should pause, download headers, or ask about agreement with -- a given point (jumping). See the 'Jumping' type for more details. -- +-- Interactions with the BlockFetch logic +-- -------------------------------------- +-- +-- When syncing, the BlockFetch logic will fetch blocks from the dynamo. If the +-- dynamo is responding too slowly, the BlockFetch logic can ask to change the +-- dynamo with a call to 'rotateDynamo'. +-- -- Interactions with the Limit on Patience -- --------------------------------------- -- @@ -101,15 +108,15 @@ -- -- > j ╔════════╗ -- > ╭────────── ║ Dynamo ║ ◀─────────╮ --- > │ ╚════════╝ │f --- > ▼ ▲ │ --- > ┌────────────┐ │ k ┌──────────┐ --- > │ Disengaged │ ◀───────────│────────── │ Objector │ --- > └────────────┘ ╭─────│────────── └──────────┘ --- > │ │ ▲ ▲ │ --- > g│ │e b │ │ │ --- > │ │ ╭─────╯ i│ │c --- > ╭╌╌╌╌╌╌╌▼╌╌╌╌╌╌╌╌╌╌╌╌╌│╌╌╌╌╌╌╌╌╌╌│╌▼╌╌╌╮ +-- > │ ╭──╚════════╝ │f +-- > ▼ │ ▲ │ +-- > ┌────────────┐ │ │ k ┌──────────┐ +-- > │ Disengaged │ ◀─│─────────│────────── │ Objector │ +-- > └────────────┘ │ ╭─────│────────── └──────────┘ +-- > │ │ │ ▲ ▲ │ +-- > l│ g│ │e b │ │ │ +-- > │ │ │ ╭─────╯ i│ │c +-- > ╭╌╌╌▼╌╌╌▼╌╌╌╌╌╌╌╌╌╌╌╌╌│╌╌╌╌╌╌╌╌╌╌│╌▼╌╌╌╮ -- > ┆ ╔═══════╗ a ┌──────┐ d ┌─────┐ | -- > ┆ ║ Happy ║ ───▶ │ LFI* │ ───▶ │ FI* │ | -- > ┆ ╚═══════╝ ◀─╮ └──────┘ └─────┘ | @@ -148,6 +155,10 @@ -- If dynamo or objector claim to have no more headers, they are disengaged -- (j|k). -- +-- The BlockFetch logic can ask to change the dynamo if it is not serving blocks +-- fast enough. If there are other non-disengaged peers the dynamo is demoted to +-- a jumper (l) and a new dynamo is elected. +-- module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping ( Context , ContextWith (..) @@ -155,18 +166,22 @@ module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping ( , JumpInstruction (..) , JumpResult (..) , Jumping (..) + , getDynamo , makeContext , mkJumping , noJumping , registerClient + , rotateDynamo , unregisterClient ) where import Cardano.Slotting.Slot (SlotNo (..), WithOrigin (..)) -import Control.Monad (forM, forM_, when) +import Control.Monad (forM, forM_, void, when) import Data.Foldable (toList) import Data.List (sortOn) +import qualified Data.Map as Map import Data.Maybe (catMaybes, fromMaybe) +import Data.Sequence.Strict (StrictSeq) import qualified Data.Sequence.Strict as Seq import GHC.Generics (Generic) import Ouroboros.Consensus.Block (HasHeader (getHeaderFields), Header, @@ -460,7 +475,7 @@ onRollBackward context slot = Dynamo _ lastJumpSlot | slot < lastJumpSlot -> do disengage (handle context) - electNewDynamo (stripContext context) + void $ electNewDynamo (stripContext context) | otherwise -> pure () -- | This function is called when we receive a 'MsgAwaitReply' message. @@ -478,7 +493,7 @@ onAwaitReply context = readTVar (cschJumping (handle context)) >>= \case Dynamo{} -> do disengage (handle context) - electNewDynamo (stripContext context) + void $ electNewDynamo (stripContext context) Objector{} -> do disengage (handle context) electNewObjector (stripContext context) @@ -511,7 +526,7 @@ processJumpResult context jumpResult = updateChainSyncState (handle context) jumpInfo RejectedJump JumpToGoodPoint{} -> do startDisengaging (handle context) - electNewDynamo (stripContext context) + void $ electNewDynamo (stripContext context) -- Not interesting in the dynamo state AcceptedJump JumpTo{} -> pure () @@ -662,10 +677,10 @@ updateJumpInfo context jumpInfo = getDynamo :: (MonadSTM m) => ChainSyncClientHandleCollection peer m blk -> - STM m (Maybe (ChainSyncClientHandle m blk)) + STM m (Maybe (peer, ChainSyncClientHandle m blk)) getDynamo handlesCol = do handles <- cschcSeq handlesCol - fmap snd <$> findM (\(_, handle) -> isDynamo <$> readTVar (cschJumping handle)) handles + findM (\(_, handle) -> isDynamo <$> readTVar (cschJumping handle)) handles where isDynamo Dynamo{} = True isDynamo _ = False @@ -720,7 +735,7 @@ registerClient context peer csState mkHandle = do Nothing -> do fragment <- csCandidate <$> readTVar csState pure $ Dynamo DynamoStarted $ pointSlot $ AF.anchorPoint fragment - Just handle -> do + Just (_, handle) -> do mJustInfo <- readTVar (cschJumpInfo handle) newJumper mJustInfo (Happy FreshJumper Nothing) cschJumping <- newTVar csjState @@ -744,7 +759,52 @@ unregisterClient context = do Disengaged{} -> pure () Jumper{} -> pure () Objector{} -> electNewObjector context' - Dynamo{} -> electNewDynamo context' + Dynamo{} -> void $ electNewDynamo context' + +-- | Elects a new dynamo by demoting the given dynamo to a jumper, moving the +-- peer to the end of the queue of chain sync handles and electing a new dynamo. +-- +-- It does nothing if there is no other engaged peer to elect or if the given +-- peer is not the dynamo. +-- +-- Yields the new dynamo, if there is one. +rotateDynamo :: + ( Ord peer, + LedgerSupportsProtocol blk, + MonadSTM m + ) => + Context m peer blk -> + peer -> + STM m (Maybe (peer, ChainSyncClientHandle m blk)) +rotateDynamo context peer = do + handles <- cschcMap (handlesCol context) + case handles Map.!? peer of + Nothing -> + -- Do not re-elect a dynamo if the peer has been disconnected. + getDynamo (handlesCol context) + Just oldDynHandle -> + readTVar (cschJumping oldDynHandle) >>= \case + Dynamo{} -> do + cschcRotateHandle (handlesCol context) peer + peerStates <- cschcSeq (handlesCol context) + mEngaged <- findNonDisengaged peerStates + case mEngaged of + Nothing -> + -- There are no engaged peers. This case cannot happen, as the + -- dynamo is always engaged. + error "rotateDynamo: no engaged peer found" + Just (newDynamoId, newDynHandle) + | newDynamoId == peer -> + -- The old dynamo is the only engaged peer left. + pure $ Just (newDynamoId, newDynHandle) + | otherwise -> do + newJumper Nothing (Happy FreshJumper Nothing) + >>= writeTVar (cschJumping oldDynHandle) + promoteToDynamo peerStates newDynamoId newDynHandle + pure $ Just (newDynamoId, newDynHandle) + _ -> + -- Do not re-elect a dynamo if the peer is not the dynamo. + getDynamo (handlesCol context) -- | Choose an unspecified new non-idling dynamo and demote all other peers to -- jumpers. @@ -754,32 +814,53 @@ electNewDynamo :: LedgerSupportsProtocol blk ) => Context m peer blk -> - STM m () + STM m (Maybe (peer, ChainSyncClientHandle m blk)) electNewDynamo context = do peerStates <- cschcSeq (handlesCol context) mDynamo <- findNonDisengaged peerStates case mDynamo of - Nothing -> pure () + Nothing -> pure Nothing Just (dynId, dynamo) -> do - fragment <- csCandidate <$> readTVar (cschState dynamo) - mJumpInfo <- readTVar (cschJumpInfo dynamo) - -- If there is no jump info, the dynamo must be just starting and - -- there is no need to set the intersection of the ChainSync server. - let dynamoInitState = maybe DynamoStarted DynamoStarting mJumpInfo - writeTVar (cschJumping dynamo) $ - Dynamo dynamoInitState $ pointSlot $ AF.headPoint fragment - -- Demote all other peers to jumpers - forM_ peerStates $ \(peer, st) -> - when (peer /= dynId) $ do - jumpingState <- readTVar (cschJumping st) - when (not (isDisengaged jumpingState)) $ - newJumper mJumpInfo (Happy FreshJumper Nothing) - >>= writeTVar (cschJumping st) - where - findNonDisengaged = - findM $ \(_, st) -> not . isDisengaged <$> readTVar (cschJumping st) - isDisengaged Disengaged{} = True - isDisengaged _ = False + promoteToDynamo peerStates dynId dynamo + pure $ Just (dynId, dynamo) + +-- | Promote the given peer to dynamo and demote all other peers to jumpers. +promoteToDynamo :: + ( MonadSTM m, + Eq peer, + LedgerSupportsProtocol blk + ) => + StrictSeq (peer, ChainSyncClientHandle m blk) -> + peer -> + ChainSyncClientHandle m blk -> + STM m () +promoteToDynamo peerStates dynId dynamo = do + fragment <- csCandidate <$> readTVar (cschState dynamo) + mJumpInfo <- readTVar (cschJumpInfo dynamo) + -- If there is no jump info, the dynamo must be just starting and + -- there is no need to set the intersection of the ChainSync server. + let dynamoInitState = maybe DynamoStarted DynamoStarting mJumpInfo + writeTVar (cschJumping dynamo) $ + Dynamo dynamoInitState $ pointSlot $ AF.headPoint fragment + -- Demote all other peers to jumpers + forM_ peerStates $ \(peer, st) -> + when (peer /= dynId) $ do + jumpingState <- readTVar (cschJumping st) + when (not (isDisengaged jumpingState)) $ + newJumper mJumpInfo (Happy FreshJumper Nothing) + >>= writeTVar (cschJumping st) + +-- | Find a non-disengaged peer in the given sequence +findNonDisengaged :: + (MonadSTM m) => + StrictSeq (peer, ChainSyncClientHandle m blk) -> + STM m (Maybe (peer, ChainSyncClientHandle m blk)) +findNonDisengaged = + findM $ \(_, st) -> not . isDisengaged <$> readTVar (cschJumping st) + +isDisengaged :: ChainSyncJumpingState m blk -> Bool +isDisengaged Disengaged{} = True +isDisengaged _ = False findM :: (Foldable f, Monad m) => (a -> m Bool) -> f a -> m (Maybe a) findM p = From fe04f3e00dff6338bf8dddbfd3b22647f3ac5b82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Facundo=20Dom=C3=ADnguez?= Date: Wed, 26 Jun 2024 12:39:26 -0300 Subject: [PATCH 22/26] Comment formatting Co-authored-by: Nicolas Jeannerod --- .../Consensus/MiniProtocol/ChainSync/Client/Jumping.hs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs index 35f76ce2be..42b5eee7f9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs @@ -273,9 +273,9 @@ mkJumping peerContext = Jumping -- -- Invariants: -- --- - If 'handlesCol is not empty, then there is exactly one dynamo in it. --- - There is at most one objector in 'handlesCol. --- - If there exist 'FoundIntersection' jumpers in 'handlesCol, then there +-- - If 'handlesCol' is not empty, then there is exactly one dynamo in it. +-- - There is at most one objector in 'handlesCol'. +-- - If there exist 'FoundIntersection' jumpers in 'handlesCol', then there -- is an objector and the intersection of the objector with the dynamo is -- at least as old as the oldest intersection of the `FoundIntersection` jumpers -- with the dynamo. From 7259d9254e6d978fded9edd3390e1925f08a7d66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Thu, 27 Jun 2024 10:14:30 +0000 Subject: [PATCH 23/26] Rename `varHandles` to `readHandles` for consistency --- .../Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs index aeeaa0acd4..2c4eae9481 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs @@ -233,8 +233,8 @@ viewChainSyncState :: STM m (Map peer (ChainSyncClientHandle m blk)) -> (ChainSyncState blk -> a) -> STM m (Map peer a) -viewChainSyncState varHandles f = - Map.map f <$> (traverse (readTVar . cschState) =<< varHandles) +viewChainSyncState readHandles f = + Map.map f <$> (traverse (readTVar . cschState) =<< readHandles) -- | Convenience function for reading the 'ChainSyncState' for a single peer -- from a nested set of TVars. From 72a5077edd144fb9aa7581cf10c50e3fa253679a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Thu, 27 Jun 2024 10:16:00 +0000 Subject: [PATCH 24/26] Mention that the objector also gets demoted --- .../Consensus/MiniProtocol/ChainSync/Client/Jumping.hs | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs index 42b5eee7f9..83cf1bb815 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/Jumping.hs @@ -156,8 +156,9 @@ -- (j|k). -- -- The BlockFetch logic can ask to change the dynamo if it is not serving blocks --- fast enough. If there are other non-disengaged peers the dynamo is demoted to --- a jumper (l) and a new dynamo is elected. +-- fast enough. If there are other non-disengaged peers, the dynamo (and the +-- objector if there is one) is demoted to a jumper (l+g) and a new dynamo is +-- elected. -- module Ouroboros.Consensus.MiniProtocol.ChainSync.Client.Jumping ( Context @@ -761,8 +762,9 @@ unregisterClient context = do Objector{} -> electNewObjector context' Dynamo{} -> void $ electNewDynamo context' --- | Elects a new dynamo by demoting the given dynamo to a jumper, moving the --- peer to the end of the queue of chain sync handles and electing a new dynamo. +-- | Elects a new dynamo by demoting the given dynamo (and the objector if there +-- is one) to a jumper, moving the peer to the end of the queue of chain sync +-- handles and electing a new dynamo. -- -- It does nothing if there is no other engaged peer to elect or if the given -- peer is not the dynamo. From 6559b9f7998b817b1680715fcfbbf698824ce780 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Wed, 19 Jun 2024 14:20:57 +0200 Subject: [PATCH 25/26] Specify the order in which to start the peers --- .../Genesis/Tests/DensityDisconnect.hs | 8 +- .../Test/Consensus/Genesis/Tests/LoE.hs | 3 +- .../Test/Consensus/Genesis/Tests/LoP.hs | 13 ++- .../Test/Consensus/Genesis/Tests/Uniform.hs | 1 + .../Test/Consensus/PeerSimulator/Run.hs | 13 ++- .../PeerSimulator/Tests/LinkedThreads.hs | 2 +- .../Consensus/PeerSimulator/Tests/Rollback.hs | 6 +- .../Consensus/PeerSimulator/Tests/Timeouts.hs | 2 +- .../Test/Consensus/PointSchedule.hs | 80 +++++++++++++------ .../Test/Consensus/PointSchedule/Shrinking.hs | 5 +- .../PointSchedule/Shrinking/Tests.hs | 4 +- 11 files changed, 98 insertions(+), 39 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs index 0b0ddcd80f..0d33d72462 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/DensityDisconnect.hs @@ -498,7 +498,8 @@ prop_densityDisconnectTriggersChainSel = (AF.Empty _) -> Origin (_ AF.:> tipBlock) -> At tipBlock advTip = getOnlyBranchTip tree - in mkPointSchedule $ peers' + in PointSchedule { + psSchedule = peers' -- Eagerly serve the honest tree, but after the adversary has -- advertised its chain up to the intersection. [[(Time 0, scheduleTipPoint trunkTip), @@ -513,4 +514,7 @@ prop_densityDisconnectTriggersChainSel = (Time 0, ScheduleBlockPoint intersect), (Time 1, scheduleHeaderPoint advTip), (Time 1, scheduleBlockPoint advTip) - ]] + ]], + psStartOrder = [], + psMinEndTime = Time 0 + } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs index 10f2f777e3..f4930a8aaf 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoE.hs @@ -2,7 +2,6 @@ {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TypeApplications #-} {-# LANGUAGE ViewPatterns #-} module Test.Consensus.Genesis.Tests.LoE (tests) where @@ -116,4 +115,4 @@ prop_adversaryHitsTimeouts timeoutsEnabled = ] -- We want to wait more than the short wait timeout psMinEndTime = Time 11 - in PointSchedule {psSchedule, psMinEndTime} + in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs index 552a10696d..1b1bbacc03 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/LoP.hs @@ -79,6 +79,7 @@ prop_wait mustTimeout = let offset :: DiffTime = if mustTimeout then 1 else -1 in PointSchedule { psSchedule = peersOnlyHonest [(Time 0, scheduleTipPoint tipBlock)] + , psStartOrder = [] , psMinEndTime = Time $ timeout + offset } @@ -108,6 +109,7 @@ prop_waitBehindForecastHorizon = [ (Time 0, scheduleTipPoint tipBlock) , (Time 0, scheduleHeaderPoint tipBlock) ] + , psStartOrder = [] , psMinEndTime = Time 11 } @@ -166,13 +168,18 @@ prop_serve mustTimeout = makeSchedule :: (HasHeader blk) => AnchoredFragment blk -> PointSchedule blk makeSchedule (AF.Empty _) = error "fragment must have at least one block" makeSchedule fragment@(_ AF.:> tipBlock) = - mkPointSchedule $ peersOnlyHonest $ + PointSchedule { + psSchedule = + peersOnlyHonest $ (Time 0, scheduleTipPoint tipBlock) : ( flip concatMap (zip [1 ..] (AF.toOldestFirst fragment)) $ \(i, block) -> [ (Time (secondsRationalToDiffTime (i * timeBetweenBlocks)), scheduleHeaderPoint block), (Time (secondsRationalToDiffTime (i * timeBetweenBlocks)), scheduleBlockPoint block) ] - ) + ), + psStartOrder = [], + psMinEndTime = Time 0 + } -- NOTE: Same as 'LoE.prop_adversaryHitsTimeouts' with LoP instead of timeouts. prop_delayAttack :: Bool -> Property @@ -249,4 +256,4 @@ prop_delayAttack lopEnabled = ] -- Wait for LoP bucket to empty psMinEndTime = Time 11 - in PointSchedule {psSchedule, psMinEndTime} + in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index 7ebdd0a84e..a3400a0184 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -288,6 +288,7 @@ prop_leashingAttackTimeLimited = advs = fmap (takePointsUntil timeLimit) advs0 pure $ PointSchedule { psSchedule = Peers honests advs + , psStartOrder = [] , psMinEndTime = timeLimit } diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index 63c8897e33..8f35e322b3 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -16,6 +16,7 @@ import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Tracer (Tracer (..), nullTracer, traceWith) import Data.Coerce (coerce) import Data.Foldable (for_) +import Data.List (sort) import qualified Data.List.NonEmpty as NonEmpty import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map @@ -340,8 +341,15 @@ startNode schedulerConfig genesisTest interval = do getCandidates = viewChainSyncState (cschcMap handles) CSClient.csCandidate fetchClientRegistry <- newFetchClientRegistry let chainDbView = CSClient.defaultChainDbView lnChainDb - activePeers = Map.restrictKeys (psrPeers lrPeerSim) (lirActive liveResult) - for_ activePeers $ \PeerResources {prShared, prChainSync, prBlockFetch} -> do + activePeers = Map.toList $ Map.restrictKeys (psrPeers lrPeerSim) (lirActive liveResult) + peersStartOrder = psStartOrder ++ sort [pid | (pid, _) <- activePeers, pid `notElem` psStartOrder] + activePeersOrdered = [ + peerResources + | pid <- peersStartOrder + , (pid', peerResources) <- activePeers + , pid == pid' + ] + for_ activePeersOrdered $ \PeerResources {prShared, prChainSync, prBlockFetch} -> do let pid = srPeerId prShared forkLinkedThread lrRegistry ("Peer overview " ++ show pid) $ -- The peerRegistry helps ensuring that if any thread fails, then @@ -405,6 +413,7 @@ startNode schedulerConfig genesisTest interval = do , gtBlockFetchTimeouts , gtLoPBucketParams = LoPBucketParams { lbpCapacity, lbpRate } , gtCSJParams = CSJParams { csjpJumpSize } + , gtSchedule = PointSchedule {psStartOrder} } = genesisTest StateViewTracers{svtTraceTracer} = lnStateViewTracers diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs index a94c69a968..e023e24335 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/LinkedThreads.hs @@ -72,4 +72,4 @@ prop_chainSyncKillsBlockFetch = do (Time 0, scheduleHeaderPoint firstBlock) ] psMinEndTime = Time $ timeout + 1 - in PointSchedule {psSchedule, psMinEndTime} + in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs index 17509dc458..e2f31d8919 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Rollback.hs @@ -95,7 +95,11 @@ rollbackSchedule n blockTree = , banalSchedulePoints trunkSuffix , banalSchedulePoints (btbSuffix branch) ] - in mkPointSchedule $ peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints + in PointSchedule { + psSchedule = peersOnlyHonest $ zip (map (Time . (/30)) [0..]) schedulePoints, + psStartOrder = [], + psMinEndTime = Time 0 + } where banalSchedulePoints :: AnchoredFragment blk -> [SchedulePoint blk] banalSchedulePoints = concatMap banalSchedulePoints' . toOldestFirst diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs index 0c594c67e3..8e218df6fa 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Tests/Timeouts.hs @@ -70,4 +70,4 @@ prop_timeouts mustTimeout = do ] -- This keeps the test running long enough to pass the timeout by 'offset'. psMinEndTime = Time $ timeout + offset - in PointSchedule {psSchedule, psMinEndTime} + in PointSchedule {psSchedule, psStartOrder = [], psMinEndTime} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs index aa4ec8f315..7fa75f5f0e 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule.hs @@ -37,7 +37,6 @@ module Test.Consensus.PointSchedule ( , ensureScheduleDuration , genesisNodeState , longRangeAttack - , mkPointSchedule , peerSchedulesBlocks , peerStates , peersStates @@ -55,7 +54,6 @@ import Control.Monad.Class.MonadTime.SI (Time (Time), addTime, import Control.Monad.ST (ST) import Data.Functor (($>)) import Data.List (mapAccumL, partition, scanl') -import qualified Data.Map.Strict as Map import Data.Maybe (catMaybes, fromMaybe, mapMaybe) import Data.Time (DiffTime) import Data.Word (Word64) @@ -77,8 +75,8 @@ import Test.Consensus.BlockTree (BlockTree (..), BlockTreeBranch (..), import Test.Consensus.PeerSimulator.StateView (StateView) import Test.Consensus.PointSchedule.NodeState (NodeState (..), genesisNodeState) -import Test.Consensus.PointSchedule.Peers (Peer (..), Peers (..), - peers', peersList) +import Test.Consensus.PointSchedule.Peers (Peer (..), PeerId, + Peers (..), getPeerIds, peers', peersList) import Test.Consensus.PointSchedule.SinglePeer (IsTrunk (IsBranch, IsTrunk), PeerScheduleParams (..), SchedulePoint (..), defaultPeerScheduleParams, mergeOn, @@ -97,21 +95,24 @@ prettyPointSchedule :: (CondenseList (NodeState blk)) => PointSchedule blk -> [String] -prettyPointSchedule peers = - [ "honest peers: " ++ show (Map.size (honestPeers $ psSchedule peers)) - , "adversaries: " ++ show (Map.size (adversarialPeers $ psSchedule peers)) - , "minimal duration: " ++ show (psMinEndTime peers) - ] ++ - zipWith3 - (\number time peerState -> - number ++ ": " ++ peerState ++ " @ " ++ time - ) - (condenseListWithPadding PadLeft $ fst <$> numberedPeersStates) - (showDT . fst . snd <$> numberedPeersStates) - (condenseList $ (snd . snd) <$> numberedPeersStates) +prettyPointSchedule ps@PointSchedule {psStartOrder, psMinEndTime} = + [] + ++ [ "psSchedule =" + ] + ++ ( zipWith3 + ( \number time peerState -> + " " ++ number ++ ": " ++ peerState ++ " @ " ++ time + ) + (condenseListWithPadding PadLeft $ fst <$> numberedPeersStates) + (showDT . fst . snd <$> numberedPeersStates) + (condenseList $ (snd . snd) <$> numberedPeersStates) + ) + ++ [ "psStartOrder = " ++ show psStartOrder, + "psMinEndTime = " ++ show psMinEndTime + ] where numberedPeersStates :: [(Int, (Time, Peer (NodeState blk)))] - numberedPeersStates = zip [0..] (peersStates peers) + numberedPeersStates = zip [0 ..] (peersStates ps) showDT :: Time -> String showDT (Time dt) = printf "%.6f" (realToFrac dt :: Double) @@ -177,15 +178,17 @@ peerScheduleBlocks = mapMaybe (withOriginToMaybe . schedulePointToBlock . snd) data PointSchedule blk = PointSchedule { -- | The actual point schedule psSchedule :: Peers (PeerSchedule blk), + -- | The order in which the peers start and connect to the node under test. + -- The peers that are absent from 'psSchedule' are ignored; the peers from + -- 'psSchedule' that are absent of 'psStartOrder' are started in the end in + -- the order of 'PeerId'. + psStartOrder :: [PeerId], -- | Minimum duration for the simulation of this point schedule. -- If no point in the schedule is larger than 'psMinEndTime', -- the simulation will still run until this time is reached. psMinEndTime :: Time } -mkPointSchedule :: Peers (PeerSchedule blk) -> PointSchedule blk -mkPointSchedule sch = PointSchedule sch $ Time 0 - -- | List of all blocks appearing in the schedules. peerSchedulesBlocks :: Peers (PeerSchedule blk) -> [blk] peerSchedulesBlocks = concatMap (peerScheduleBlocks . value) . peersList @@ -208,7 +211,11 @@ longRangeAttack :: longRangeAttack BlockTree {btTrunk, btBranches = [branch]} g = do honest <- peerScheduleFromTipPoints g honParams [(IsTrunk, [AF.length btTrunk - 1])] btTrunk [] adv <- peerScheduleFromTipPoints g advParams [(IsBranch, [AF.length (btbFull branch) - 1])] btTrunk [btbFull branch] - pure $ mkPointSchedule $ peers' [honest] [adv] + pure $ PointSchedule { + psSchedule = peers' [honest] [adv], + psStartOrder = [], + psMinEndTime = Time 0 + } where honParams = defaultPeerScheduleParams {pspHeaderDelayInterval = (0.3, 0.4)} advParams = defaultPeerScheduleParams {pspTipDelayInterval = (0, 0.1)} @@ -240,6 +247,7 @@ uniformPoints PointsGeneratorParams {pgpExtraHonestPeers, pgpDowntime} = case pg -- Include rollbacks in a percentage of adversaries, in which case that peer uses two branchs. -- uniformPointsWithExtraHonestPeers :: + forall g m blk. (StatefulGen g m, AF.HasHeader blk) => Int -> BlockTree blk -> @@ -254,7 +262,9 @@ uniformPointsWithExtraHonestPeers honests <- replicateM (extraHonestPeers + 1) $ mkSchedule [(IsTrunk, [honestTip0 .. AF.length btTrunk - 1])] [] advs <- takeBranches btBranches - pure $ mkPointSchedule $ peers' honests advs + let psSchedule = peers' honests advs + psStartOrder <- shuffle (getPeerIds psSchedule) + pure $ PointSchedule {psSchedule, psStartOrder, psMinEndTime = Time 0} where takeBranches = \case [] -> pure [] @@ -305,6 +315,15 @@ uniformPointsWithExtraHonestPeers rollbackProb = 0.2 + -- Inefficient implementation, but sufficient for small lists. + shuffle :: [a] -> m [a] + shuffle [] = pure [] + shuffle xs = do + i <- Random.uniformRM (0, length xs - 1) g + let x = xs !! i + xs' = take i xs ++ drop (i+1) xs + (x :) <$> shuffle xs' + minusClamp :: (Ord a, Num a) => a -> a -> a minusClamp a b | a <= b = 0 | otherwise = a - b @@ -361,6 +380,7 @@ syncTips honests advs = -- -- Includes rollbacks in some schedules. uniformPointsWithExtraHonestPeersAndDowntime :: + forall g m blk. (StatefulGen g m, AF.HasHeader blk) => Int -> SecurityParam -> @@ -383,7 +403,9 @@ uniformPointsWithExtraHonestPeersAndDowntime mkSchedule [(IsTrunk, [honestTip0, minusClamp (AF.length btTrunk) 1])] [] advs <- takeBranches pauseSlot btBranches let (honests', advs') = syncTips honests advs - pure $ mkPointSchedule $ peers' honests' advs' + psSchedule = peers' honests' advs' + psStartOrder <- shuffle $ getPeerIds psSchedule + pure $ PointSchedule {psSchedule, psStartOrder, psMinEndTime = Time 0} where takeBranches pause = \case [] -> pure [] @@ -438,6 +460,15 @@ uniformPointsWithExtraHonestPeersAndDowntime rollbackProb = 0.2 + -- Inefficient implementation, but sufficient for small lists. + shuffle :: [a] -> m [a] + shuffle [] = pure [] + shuffle xs = do + i <- Random.uniformRM (0, length xs - 1) g + let x = xs !! i + xs' = take i xs ++ drop (i+1) xs + (x :) <$> shuffle xs' + newtype ForecastRange = ForecastRange { unForecastRange :: Word64 } deriving (Show) @@ -545,9 +576,10 @@ stToGen gen = do pure (runSTGen_ seed gen) ensureScheduleDuration :: GenesisTest blk a -> PointSchedule blk -> PointSchedule blk -ensureScheduleDuration gt PointSchedule{psSchedule, psMinEndTime} = +ensureScheduleDuration gt PointSchedule{psSchedule, psStartOrder, psMinEndTime} = PointSchedule { psSchedule + , psStartOrder , psMinEndTime = max psMinEndTime (Time endingDelay) } where diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs index 820f844b22..eb24ccf6e0 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking.hs @@ -41,7 +41,7 @@ shrinkPeerSchedules :: StateView TestBlock -> [GenesisTestFull TestBlock] shrinkPeerSchedules genesisTest@GenesisTest{gtBlockTree, gtSchedule} _stateView = - let PointSchedule {psSchedule} = gtSchedule + let PointSchedule {psSchedule, psStartOrder} = gtSchedule simulationDuration = duration gtSchedule trimmedBlockTree sch = trimBlockTree' sch gtBlockTree shrunkAdversarialPeers = @@ -50,6 +50,7 @@ shrinkPeerSchedules genesisTest@GenesisTest{gtBlockTree, gtSchedule} _stateView genesisTest { gtSchedule = PointSchedule { psSchedule = shrunkSchedule + , psStartOrder , psMinEndTime = simulationDuration } , gtBlockTree = trimmedBlockTree shrunkSchedule @@ -61,6 +62,7 @@ shrinkPeerSchedules genesisTest@GenesisTest{gtBlockTree, gtSchedule} _stateView <&> \shrunkSchedule -> genesisTest { gtSchedule = PointSchedule { psSchedule = shrunkSchedule + , psStartOrder , psMinEndTime = simulationDuration } } @@ -81,6 +83,7 @@ shrinkByRemovingAdversaries genesisTest@GenesisTest{gtSchedule, gtBlockTree} _st in genesisTest { gtSchedule = PointSchedule { psSchedule = shrunkSchedule + , psStartOrder = psStartOrder gtSchedule , psMinEndTime = simulationDuration } , gtBlockTree = trimmedBlockTree diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs index d715375a3f..b375a8ee94 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PointSchedule/Shrinking/Tests.hs @@ -82,14 +82,14 @@ checkShrinkProperty :: (Peers (PeerSchedule TestBlock) -> Peers (PeerSchedule Te checkShrinkProperty prop = forAllBlind (genChains (choose (1, 4)) >>= genUniformSchedulePoints) - (\sch@PointSchedule{psSchedule, psMinEndTime} -> + (\sch@PointSchedule{psSchedule, psStartOrder, psMinEndTime} -> conjoin $ map (\shrunk -> counterexample ( "Original schedule:\n" ++ unlines (map (" " ++) $ prettyPointSchedule sch) ++ "\nShrunk schedule:\n" - ++ unlines (map (" " ++) $ prettyPointSchedule $ PointSchedule shrunk psMinEndTime) + ++ unlines (map (" " ++) $ prettyPointSchedule $ PointSchedule {psSchedule = shrunk, psStartOrder, psMinEndTime}) ) (prop psSchedule shrunk) ) From b1e649ae1ab3c31b8f453092bd2198d9482f0cf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nicolas=20=E2=80=9CNiols=E2=80=9D=20Jeannerod?= Date: Tue, 18 Jun 2024 19:07:28 +0200 Subject: [PATCH 26/26] Add a BlockFetch leashing attack test --- .../Test/Consensus/Genesis/Tests/Uniform.hs | 49 +++++++++++++++++-- 1 file changed, 46 insertions(+), 3 deletions(-) diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs index a3400a0184..3a4b3b7814 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/Genesis/Tests/Uniform.hs @@ -19,7 +19,7 @@ module Test.Consensus.Genesis.Tests.Uniform ( import Cardano.Slotting.Slot (SlotNo (SlotNo), WithOrigin (..)) import Control.Monad (replicateM) import Control.Monad.Class.MonadTime.SI (Time, addTime) -import Data.List (intercalate, sort) +import Data.List (intercalate, sort, uncons) import qualified Data.List.NonEmpty as NE import qualified Data.Map.Strict as Map import Data.Maybe (fromMaybe, mapMaybe) @@ -40,7 +40,8 @@ import Test.Consensus.PeerSimulator.Run (SchedulerConfig (..), defaultSchedulerConfig) import Test.Consensus.PeerSimulator.StateView import Test.Consensus.PointSchedule -import Test.Consensus.PointSchedule.Peers (Peers (..), isHonestPeerId) +import Test.Consensus.PointSchedule.Peers (Peers (..), getPeerIds, + isHonestPeerId, peers') import Test.Consensus.PointSchedule.Shrinking (shrinkByRemovingAdversaries, shrinkPeerSchedules) import Test.Consensus.PointSchedule.SinglePeer @@ -72,7 +73,8 @@ tests = -- because this test writes the immutable chain to disk and `instance Binary TestBlock` -- chokes on long chains. adjustQuickCheckMaxSize (const 10) $ - testProperty "the node is shut down and restarted after some time" prop_downtime + testProperty "the node is shut down and restarted after some time" prop_downtime, + testProperty "block fetch leashing attack" prop_blockFetchLeashingAttack ] theProperty :: @@ -416,3 +418,44 @@ prop_downtime = forAllGenesisTest { pgpExtraHonestPeers = fromIntegral (gtExtraHonestPeers gt) , pgpDowntime = DowntimeWithSecurityParam (gtSecurityParam gt) } + +prop_blockFetchLeashingAttack :: Property +prop_blockFetchLeashingAttack = + forAllGenesisTest + (disableBoringTimeouts <$> genChains (pure 0) `enrichedWith` genBlockFetchLeashingSchedule) + defaultSchedulerConfig + { scEnableLoE = True, + scEnableLoP = True, + scEnableCSJ = True + } + shrinkPeerSchedules + theProperty + where + genBlockFetchLeashingSchedule :: GenesisTest TestBlock () -> QC.Gen (PointSchedule TestBlock) + genBlockFetchLeashingSchedule genesisTest = do + PointSchedule {psSchedule, psMinEndTime} <- + stToGen $ + uniformPoints + (PointsGeneratorParams {pgpExtraHonestPeers = 1, pgpDowntime = NoDowntime}) + (gtBlockTree genesisTest) + peers <- QC.shuffle $ Map.elems $ honestPeers psSchedule + let (honest, adversaries) = fromMaybe (error "blockFetchLeashingAttack") $ uncons peers + adversaries' = map (filter (not . isBlockPoint . snd)) adversaries + psSchedule' = peers' [honest] adversaries' + -- Important to shuffle the order in which the peers start, otherwise the + -- honest peer starts first and systematically becomes dynamo. + psStartOrder <- shuffle $ getPeerIds psSchedule' + pure $ PointSchedule {psSchedule = psSchedule', psStartOrder, psMinEndTime} + + isBlockPoint :: SchedulePoint blk -> Bool + isBlockPoint (ScheduleBlockPoint _) = True + isBlockPoint _ = False + + disableBoringTimeouts gt = + gt + { gtChainSyncTimeouts = + (gtChainSyncTimeouts gt) + { mustReplyTimeout = Nothing, + idleTimeout = Nothing + } + }