diff --git a/.gitignore b/.gitignore index 191d6aee62..e4543674e2 100644 --- a/.gitignore +++ b/.gitignore @@ -84,6 +84,9 @@ output.txt .nx/cache .nx/workspace-data output1.txt +output2.txt +output3.txt +output4.txt .zed **/.claude/**/* diff --git a/Cargo.lock b/Cargo.lock index fd435ac0ce..396d5592ef 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -922,6 +922,7 @@ name = "batched-merkle-tree-test" version = "0.1.0" dependencies = [ "light-account-checks", + "light-array-map", "light-batched-merkle-tree", "light-bloom-filter", "light-compressed-account", @@ -3382,11 +3383,14 @@ dependencies = [ "light-bloom-filter", "light-compressed-account", "light-hasher", + "light-indexed-array", "light-macros", "light-merkle-tree-metadata", "light-merkle-tree-reference", "light-verifier", "light-zero-copy", + "num-bigint 0.4.6", + "num-traits", "pinocchio", "rand 0.8.5", "solana-account-info", diff --git a/program-libs/account-checks/src/checks.rs b/program-libs/account-checks/src/checks.rs index a957d1f21a..fb9bd8ad43 100644 --- a/program-libs/account-checks/src/checks.rs +++ b/program-libs/account-checks/src/checks.rs @@ -66,6 +66,7 @@ pub fn check_account_info( /// 1. discriminator is uninitialized /// 2. sets discriminator pub fn set_discriminator(bytes: &mut [u8]) -> Result<(), AccountError> { + #[cfg(not(kani))] check_data_is_zeroed::(bytes) .map_err(|_| AccountError::AlreadyInitialized)?; bytes[0..DISCRIMINATOR_LEN].copy_from_slice(&T::LIGHT_DISCRIMINATOR); diff --git a/program-libs/batched-merkle-tree/Cargo.toml b/program-libs/batched-merkle-tree/Cargo.toml index 7081953197..18c991528c 100644 --- a/program-libs/batched-merkle-tree/Cargo.toml +++ b/program-libs/batched-merkle-tree/Cargo.toml @@ -9,6 +9,7 @@ edition = "2021" [features] default = ["solana"] test-only = [] +kani = ["light-zero-copy/kani"] solana = [ "solana-program-error", "solana-account-info", @@ -59,7 +60,10 @@ light-merkle-tree-reference = { workspace = true } light-account-checks = { workspace = true, features = ["test-only"] } light-compressed-account = { workspace = true, features = ["new-unique"] } light-hasher = { workspace = true, features = ["keccak"] } +light-indexed-array = { workspace = true } +num-bigint = { workspace = true } +num-traits = { workspace = true } [lints.rust.unexpected_cfgs] level = "allow" -check-cfg = ['cfg(target_os, values("solana"))'] +check-cfg = ['cfg(target_os, values("solana"))', 'cfg(kani)'] diff --git a/program-libs/batched-merkle-tree/docs/CLAUDE.md b/program-libs/batched-merkle-tree/docs/CLAUDE.md new file mode 100644 index 0000000000..45205920cc --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/CLAUDE.md @@ -0,0 +1,148 @@ +# Batched Merkle Tree Library + +The `light-batched-merkle-tree` crate provides batched Merkle tree implementations for the Light Protocol account compression program. Instead of updating trees one leaf at a time, this library batches multiple insertions and updates them with zero-knowledge proofs (ZKPs), enabling efficient on-chain verification. Trees maintain a cyclic root history for validity proofs, and use bloom filters for non-inclusion proofs while batches are being filled. + +There are two tree types: **state trees** (two accounts tree account (input queue, tree metadata, roots), output queue account) for compressed accounts, and **address trees** (one account that contains the address queue, tree metadata, roots) for address registration. + +## Accounts + +### Account Types + +- **[TREE_ACCOUNT.md](TREE_ACCOUNT.md)** - BatchedMerkleTreeAccount (state and address trees) +- **[QUEUE_ACCOUNT.md](QUEUE_ACCOUNT.md)** - BatchedQueueAccount (output queue for state trees) + +### Overview + +The batched merkle tree library uses two main Solana account types: + +**BatchedMerkleTreeAccount:** +The main tree account storing tree roots, root history, and integrated input queue (bloom filters + hash chains for nullifiers or addresses). Used for both state trees and address trees. + +**Details:** [TREE_ACCOUNT.md](TREE_ACCOUNT.md) + +**BatchedQueueAccount:** +Output queue account for state trees that temporarily stores compressed account hashes before tree insertion. Enables immediate spending via proof-by-index. + +**Details:** [QUEUE_ACCOUNT.md](QUEUE_ACCOUNT.md) + +### State Trees vs Address Trees + +**State Trees (2 accounts):** +- `BatchedMerkleTreeAccount` with integrated input queue (for nullifiers) +- Separate `BatchedQueueAccount` for output operations (appending new compressed accounts) + +**Address Trees (1 account):** +- `BatchedMerkleTreeAccount` with integrated input queue (for addresses) +- No separate output queue + +## Operations + +### Initialization +- **[INITIALIZE_STATE_TREE.md](INITIALIZE_STATE_TREE.md)** - Create state tree + output queue pair (2 solana accounts) + - Source: [`src/initialize_state_tree.rs`](../src/initialize_state_tree.rs) + +- **[INITIALIZE_ADDRESS_TREE.md](INITIALIZE_ADDRESS_TREE.md)** - Create address tree with integrated queue (1 solana account) + - Source: [`src/initialize_address_tree.rs`](../src/initialize_address_tree.rs) + +### Queue Insertion Operations +- **[INSERT_OUTPUT_QUEUE.md](INSERT_OUTPUT_QUEUE.md)** - Insert compressed account hash into output queue (state tree) + - Source: [`src/queue.rs`](../src/queue.rs) - `BatchedQueueAccount::insert_into_current_batch` + +- **[INSERT_INPUT_QUEUE.md](INSERT_INPUT_QUEUE.md)** - Insert nullifiers into input queue (state tree) + - Source: [`src/merkle_tree.rs`](../src/merkle_tree.rs) - `BatchedMerkleTreeAccount::insert_nullifier_into_queue` + +- **[INSERT_ADDRESS_QUEUE.md](INSERT_ADDRESS_QUEUE.md)** - Insert addresses into address queue + - Source: [`src/merkle_tree.rs`](../src/merkle_tree.rs) - `BatchedMerkleTreeAccount::insert_address_into_queue` + +### Tree Update Operations +- **[UPDATE_FROM_OUTPUT_QUEUE.md](UPDATE_FROM_OUTPUT_QUEUE.md)** - Batch append with ZKP verification + - Source: [`src/merkle_tree.rs`](../src/merkle_tree.rs) - `BatchedMerkleTreeAccount::update_tree_from_output_queue_account` + +- **[UPDATE_FROM_INPUT_QUEUE.md](UPDATE_FROM_INPUT_QUEUE.md)** - Batch nullify/address updates with ZKP + - Source: [`src/merkle_tree.rs`](../src/merkle_tree.rs) - `update_tree_from_input_queue`, `update_tree_from_address_queue` + +## Key Concepts + +**Batching System:** Trees use 2 alternating batches. While one batch is being filled, the previous batch can be updated into the tree with a ZKP. + +**ZKP Batches:** Each batch is divided into smaller ZKP batches (`batch_size / zkp_batch_size`). Trees are updated incrementally by ZKP batch. + +**Bloom Filters:** Input queues (nullifier queue for state trees, address queue for address trees) use bloom filters for non-inclusion proofs. While a batch is filling, values are inserted into the bloom filter. After the batch is fully inserted into the tree and the next batch is 50% full, the bloom filter is zeroed to prevent false positives. Output queues do not use bloom filters. + +**Value Vecs:** Output queues store the actual compressed account hashes in value vectors (one per batch). Values can be accessed by leaf index even before they're inserted into the tree, enabling immediate spending of newly created compressed accounts. + +**Hash Chains:** Each ZKP batch has a hash chain storing the Poseidon hash of all values in that ZKP batch. These hash chains are used as public inputs for ZKP verification. + +**ZKP Verification:** Tree updates require zero-knowledge proofs proving the correctness of batch operations (old root + queue values → new root). Public inputs: old root, new root, hash chain (commitment to queue elements), and for appends: start_index (output queue) or next_index (address queue). + +**Root History:** Trees maintain a cyclic buffer of recent roots (default: 200). This enables validity proofs for recently spent compressed accounts even as the tree continues to update. + +**Rollover:** When a tree reaches capacity (2^height leaves), it must be replaced with a new tree. The rollover process creates a new tree and marks the old tree as rolled over, preserving the old tree's roots for ongoing validity proofs. A rollover can be performed once the rollover threshold is met (default: 95% full). + +**State vs Address Trees:** +- **State trees** have a separate `BatchedQueueAccount` for output operations (appending new leaves). Input operations (nullifying) use the integrated input queue on the tree account. +- **Address trees** have only an integrated input queue on the tree account - no separate output queue. + +## ZKP Verification + +Batch update operations require zero-knowledge proofs generated by the Light Protocol prover: + +- **Prover Server:** `prover/server/` - Generates ZK proofs for batch operations +- **Prover Client:** `prover/client/` - Client libraries for requesting proofs +- **Batch Update Circuits:** `prover/server/prover/v2/` - Circuit definitions for batch append, batch update (nullify), and batch address append operations + +## Dependencies + +This crate relies on several Light Protocol libraries: + +- **`light-bloom-filter`** - Bloom filter implementation for non-inclusion proofs +- **`light-hasher`** - Poseidon hash implementation for hash chains and tree operations +- **`light-verifier`** - ZKP verification for batch updates +- **`light-zero-copy`** - Zero-copy serialization for efficient account data access +- **`light-merkle-tree-metadata`** - Shared metadata structures for merkle trees +- **`light-compressed-account`** - Compressed account types and utilities +- **`light-account-checks`** - Account validation and discriminator checks + +## Testing and Reference Implementations + +**IndexedMerkleTree Reference Implementation:** +- **`light-merkle-tree-reference`** - Reference implementation of indexed Merkle trees (dev dependency) +- Source: `program-tests/merkle-tree/src/indexed.rs` - Canonical IndexedMerkleTree implementation used for generating constants and testing +- Used to generate constants like `ADDRESS_TREE_INIT_ROOT_40` in [`src/constants.rs`](../src/constants.rs) +- Initializes address trees with a single leaf: `H(0, HIGHEST_ADDRESS_PLUS_ONE)` + +## Source Code Structure + +**Core Account Types:** +- [`src/merkle_tree.rs`](../src/merkle_tree.rs) - `BatchedMerkleTreeAccount` (prove inclusion, nullify existing state, create new addresses) +- [`src/queue.rs`](../src/queue.rs) - `BatchedQueueAccount` (add new state (transaction outputs)) +- [`src/batch.rs`](../src/batch.rs) - `Batch` state machine (Fill → Full → Inserted) +- [`src/queue_batch_metadata.rs`](../src/queue_batch_metadata.rs) - `QueueBatches` metadata + +**Metadata and Configuration:** +- [`src/merkle_tree_metadata.rs`](../src/merkle_tree_metadata.rs) - `BatchedMerkleTreeMetadata` and account size calculations +- [`src/constants.rs`](../src/constants.rs) - Default configuration values + +**ZKP Infrastructure:** +- `prover/server/` - Prover server that generates ZK proofs for batch operations +- `prover/client/` - Client libraries for requesting proofs +- `prover/server/prover/v2/` - Batch update circuit definitions (append, nullify, address append) + +**Initialization:** +- [`src/initialize_state_tree.rs`](../src/initialize_state_tree.rs) - State tree initialization +- [`src/initialize_address_tree.rs`](../src/initialize_address_tree.rs) - Address tree initialization +- [`src/rollover_state_tree.rs`](../src/rollover_state_tree.rs) - State tree rollover +- [`src/rollover_address_tree.rs`](../src/rollover_address_tree.rs) - Address tree rollover + +**Errors:** +- [`src/errors.rs`](../src/errors.rs) - `BatchedMerkleTreeError` enum with all error types + +## Error Codes + +All errors are defined in [`src/errors.rs`](../src/errors.rs) and map to u32 error codes (14301-14312 range): +- `BatchNotReady` (14301) - Batch is not ready to be inserted +- `BatchAlreadyInserted` (14302) - Batch is already inserted +- `TreeIsFull` (14310) - Batched Merkle tree reached capacity +- `NonInclusionCheckFailed` (14311) - Value exists in bloom filter +- `BloomFilterNotZeroed` (14312) - Bloom filter must be zeroed before reuse +- Additional errors from underlying libraries (hasher, zero-copy, verifier, etc.) diff --git a/program-libs/batched-merkle-tree/docs/INITIALIZE_ADDRESS_TREE.md b/program-libs/batched-merkle-tree/docs/INITIALIZE_ADDRESS_TREE.md new file mode 100644 index 0000000000..22906c9c91 --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/INITIALIZE_ADDRESS_TREE.md @@ -0,0 +1,101 @@ +# Initialize Address Tree + +**path:** src/initialize_address_tree.rs + +**description:** +Initializes an address tree with integrated address queue. This operation creates **one Solana account**: + +**Address Merkle tree account** (`BatchedMerkleTreeAccount`) - Stores tree roots, root history, and integrated address queue (bloom filters + hash chains for addresses) +- Account layout `BatchedMerkleTreeAccount` defined in: src/merkle_tree.rs +- Metadata `BatchedMerkleTreeMetadata` defined in: src/merkle_tree_metadata.rs +- Tree type: `TreeType::AddressV2` (5) +- Initial root: `ADDRESS_TREE_INIT_ROOT_40` (pre-initialized with one indexed array element) +- Starts at next_index: 1 (index 0 contains sentinel element) +- Discriminator: b`BatchMta` `[66, 97, 116, 99, 104, 77, 116, 97]` (8 bytes) + +Address trees are used for address registration in the Light Protocol. New addresses are inserted into the address queue, then batch-updated into the tree with ZKPs. Unlike state trees, address trees have no separate output queue - the address queue is integrated into the tree account. + +**Instruction data:** +Instruction data is defined in: src/initialize_address_tree.rs + +`InitAddressTreeAccountsInstructionData` struct: + +**Tree configuration:** +- `height`: u32 - Tree height (default: 40, capacity = 2^40 leaves) +- `index`: u64 - Unchecked identifier of the address tree +- `root_history_capacity`: u32 - Size of root history cyclic buffer (default: 200) + +**Batch sizes:** +- `input_queue_batch_size`: u64 - Elements per address queue batch (default: 15,000) +- `input_queue_zkp_batch_size`: u64 - Elements per ZKP batch for address insertions (default: 250) + +**Validation:** Batch size must be divisible by ZKP batch size. Error: `BatchSizeNotDivisibleByZkpBatchSize` if validation fails. + +**Bloom filter configuration:** +- `bloom_filter_capacity`: u64 - Capacity in bits (default: batch_size * 8) +- `bloom_filter_num_iters`: u64 - Number of hash functions (default: 3 for test, 10 for production) + +**Validation:** +- Capacity must be divisible by 8 +- Capacity must be >= batch_size * 8 + +**Access control:** +- `program_owner`: Option - Optional program owning the tree +- `forester`: Option - Optional forester pubkey for non-Light foresters +- `owner`: Pubkey - Account owner (passed separately as function parameter, not in struct) + +**Rollover and fees:** +- `rollover_threshold`: Option - Percentage threshold for rollover (default: 95%) +- `network_fee`: Option - Network fee amount (default: 10,000 lamports) +- `close_threshold`: Option - Placeholder, unimplemented + +**Accounts:** +1. merkle_tree_account + - mutable + - Address Merkle tree account being initialized + - Must be rent-exempt for calculated size + +Note: No signer accounts required - account is expected to be pre-created with correct size + +**Instruction Logic and Checks:** + +1. **Calculate account size:** + - Merkle tree account size: Based on input_queue_batch_size, bloom_filter_capacity, input_queue_zkp_batch_size, root_history_capacity, and height + - Account size formula defined in: src/merkle_tree.rs (`get_merkle_tree_account_size`) + +2. **Verify rent exemption:** + - Check: merkle_tree_account balance >= minimum rent exemption for mt_account_size + - Uses: `check_account_balance_is_rent_exempt` from `light-account-checks` + - Store rent amount for rollover fee calculation + +3. **Initialize address Merkle tree account:** + - Set discriminator: `BatchMta` (8 bytes) + - Create tree metadata: + - tree_type: `TreeType::AddressV2` (5) + - associated_queue: Pubkey::default() (address trees have no separate queue) + - Calculate rollover_fee: Based on rollover_threshold, height, and merkle_tree_rent + - access_metadata: Set owner, program_owner, forester + - rollover_metadata: Set index, rollover_fee, rollover_threshold, network_fee, close_threshold, additional_bytes=None + - Initialize root history: Cyclic buffer with capacity=root_history_capacity, first entry = `ADDRESS_TREE_INIT_ROOT_40` + - Initialize integrated address queue: + - 2 bloom filter stores (one per batch), size = bloom_filter_capacity / 8 bytes each + - 2 hash chain stores (one per batch), capacity = (input_queue_batch_size / input_queue_zkp_batch_size) each + - Batch metadata with input_queue_batch_size and input_queue_zkp_batch_size + - Compute hashed_pubkey: Hash and truncate to 31 bytes for bn254 field compatibility + - next_index: 1 (starts at 1 because index 0 contains pre-initialized sentinel element) + - sequence_number: 0 (increments with each tree update) + - Rollover fee: Charged on address tree operations + +4. **Validate configurations:** + - root_history_capacity >= (input_queue_batch_size / input_queue_zkp_batch_size) + - Rationale: Ensures sufficient space for roots generated by address queue operations + - ZKP batch sizes must be 10 or 250 (only supported circuit sizes for address trees) + - height must be 40 (fixed for address trees) + +**Errors:** +- `AccountError::AccountNotRentExempt` (error code: 12011) - Account balance insufficient for rent exemption at calculated size +- `AccountError::InvalidAccountSize` (error code: 12006) - Account data length doesn't match calculated size requirements +- `BatchedMerkleTreeError::BatchSizeNotDivisibleByZkpBatchSize` (error code: 14305) - Batch size is not evenly divisible by ZKP batch size +- `MerkleTreeMetadataError::InvalidRolloverThreshold` - Rollover threshold value is invalid (must be percentage) +- `ZeroCopyError::Size` - Account size mismatch during zero-copy deserialization +- `BorshError` - Failed to serialize or deserialize metadata structures diff --git a/program-libs/batched-merkle-tree/docs/INITIALIZE_STATE_TREE.md b/program-libs/batched-merkle-tree/docs/INITIALIZE_STATE_TREE.md new file mode 100644 index 0000000000..fd68b63f3c --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/INITIALIZE_STATE_TREE.md @@ -0,0 +1,133 @@ +# Initialize State Tree + +**path:** src/initialize_state_tree.rs + +**description:** +Initializes a state tree with integrated input queue and separate output queue. This operation creates **two Solana accounts**: + +1. **State Merkle tree account** (`BatchedMerkleTreeAccount`) - Stores tree roots, root history, and integrated input queue (bloom filters + hash chains for nullifiers) + - Account layout `BatchedMerkleTreeAccount` defined in: src/merkle_tree.rs + - Metadata `BatchedMerkleTreeMetadata` defined in: src/merkle_tree_metadata.rs + - Tree type: `TreeType::StateV2` (4) + - Initial root: zero bytes for specified height + - Discriminator: b`BatchMta` `[66, 97, 116, 99, 104, 77, 116, 97]` (8 bytes) + +2. **Output queue account** (`BatchedQueueAccount`) - Temporarily stores compressed account hashes before tree insertion + - Account layout `BatchedQueueAccount` defined in: src/queue.rs + - Metadata `BatchedQueueMetadata` defined in: src/queue.rs + - Queue type: `QueueType::OutputStateV2` + - Enables immediate spending via proof-by-index + - Discriminator: b`queueacc` `[113, 117, 101, 117, 101, 97, 99, 99]` (8 bytes) + +State trees are used for compressed account lifecycle management. The output queue stores newly created compressed accounts, while the input queue (integrated into the tree account) tracks nullifiers when compressed accounts are spent. + +**Instruction data:** +Instruction data is defined in: src/initialize_state_tree.rs + +`InitStateTreeAccountsInstructionData` struct: + +**Tree configuration:** +- `height`: u32 - Tree height (default: 32, capacity = 2^32 leaves) +- `index`: u64 - Unchecked identifier of the state tree +- `root_history_capacity`: u32 - Size of root history cyclic buffer (default: 200) + +**Batch sizes:** +- `input_queue_batch_size`: u64 - Elements per input queue batch (default: 15,000) +- `output_queue_batch_size`: u64 - Elements per output queue batch (default: 15,000) +- `input_queue_zkp_batch_size`: u64 - Elements per ZKP batch for nullifications (default: 500) +- `output_queue_zkp_batch_size`: u64 - Elements per ZKP batch for appends (default: 500) + +**Validation:** Batch sizes must be divisible by their respective ZKP batch sizes. Error: `BatchSizeNotDivisibleByZkpBatchSize` if validation fails. + +**Bloom filter configuration (input queue only):** +- `bloom_filter_capacity`: u64 - Capacity in bits (default: batch_size * 8) +- `bloom_filter_num_iters`: u64 - Number of hash functions (default: 3 for test, 10 for production) + +**Validation:** +- Capacity must be divisible by 8 +- Capacity must be >= batch_size * 8 + +**Access control:** +- `program_owner`: Option - Optional program owning the tree +- `forester`: Option - Optional forester pubkey for non-Light foresters +- `owner`: Pubkey - Account owner (passed separately as function parameter, not in struct) + +**Rollover and fees:** +- `rollover_threshold`: Option - Percentage threshold for rollover (default: 95%) +- `network_fee`: Option - Network fee amount (default: 5,000 lamports) +- `additional_bytes`: u64 - CPI context account size for rollover (default: 20KB + 8 bytes) +- `close_threshold`: Option - Placeholder, unimplemented + +**Accounts:** +1. merkle_tree_account + - mutable + - State Merkle tree account being initialized + - Must be rent-exempt for calculated size + +2. queue_account + - mutable + - Output queue account being initialized + - Must be rent-exempt for calculated size + +Note: No signer accounts required - accounts are expected to be pre-created with correct sizes + +**Instruction Logic and Checks:** + +1. **Calculate account sizes:** + - Queue account size: Based on output_queue_batch_size and output_queue_zkp_batch_size + - Merkle tree account size: Based on input_queue_batch_size, bloom_filter_capacity, input_queue_zkp_batch_size, root_history_capacity, and height + - Account size formulas defined in: src/queue.rs (`get_output_queue_account_size`) and src/merkle_tree.rs (`get_merkle_tree_account_size`) + +2. **Verify rent exemption:** + - Check: queue_account balance >= minimum rent exemption for queue_account_size + - Check: merkle_tree_account balance >= minimum rent exemption for mt_account_size + - Uses: `check_account_balance_is_rent_exempt` from `light-account-checks` + - Store rent amounts for rollover fee calculation + +3. **Initialize output queue account:** + - Set discriminator: `queueacc` (8 bytes) + - Create queue metadata: + - queue_type: `QueueType::OutputStateV2` + - associated_merkle_tree: merkle_tree_account pubkey + - Calculate rollover_fee: Based on rollover_threshold, height, and total rent (merkle_tree_rent + additional_bytes_rent + queue_rent) + - access_metadata: Set owner, program_owner, forester + - rollover_metadata: Set index, rollover_fee, rollover_threshold, network_fee, close_threshold, additional_bytes + - Initialize batch metadata: + - 2 batches (alternating) + - batch_size: output_queue_batch_size + - zkp_batch_size: output_queue_zkp_batch_size + - bloom_filter_capacity: 0 (output queues don't use bloom filters) + - Initialize value vecs: 2 vectors (one per batch), capacity = batch_size each + - Initialize hash chain stores: 2 vectors (one per batch), capacity = (batch_size / zkp_batch_size) each + - Compute hashed pubkeys: Hash and truncate to 31 bytes for bn254 field compatibility + - tree_capacity: 2^height + - Rollover fee: Charged when creating output compressed accounts (insertion into output queue) + +4. **Initialize state Merkle tree account:** + - Set discriminator: `BatchMta` (8 bytes) + - Create tree metadata: + - tree_type: `TreeType::StateV2` (4) + - associated_queue: queue_account pubkey + - access_metadata: Set owner, program_owner, forester + - rollover_metadata: Set index, rollover_fee=0 (charged on queue insertion, not tree ops), rollover_threshold, network_fee, close_threshold, additional_bytes=None + - Initialize root history: Cyclic buffer with capacity=root_history_capacity, first entry = zero bytes for tree height + - Initialize integrated input queue: + - 2 bloom filter stores (one per batch), size = bloom_filter_capacity / 8 bytes each + - 2 hash chain stores (one per batch), capacity = (input_queue_batch_size / input_queue_zkp_batch_size) each + - Batch metadata with input_queue_batch_size and input_queue_zkp_batch_size + - Compute hashed_pubkey: Hash and truncate to 31 bytes for bn254 field compatibility + - next_index: 0 (starts empty) + - sequence_number: 0 (increments with each tree update) + +5. **Validate configurations:** + - root_history_capacity >= (output_queue_batch_size / output_queue_zkp_batch_size) + (input_queue_batch_size / input_queue_zkp_batch_size) + - Rationale: Ensures sufficient space for roots generated by both input and output operations + - ZKP batch sizes must be 10 or 500 (only supported circuit sizes) + +**Errors:** +- `AccountError::AccountNotRentExempt` (error code: 12011) - Account balance insufficient for rent exemption at calculated size +- `AccountError::InvalidAccountSize` (error code: 12006) - Account data length doesn't match calculated size requirements +- `BatchedMerkleTreeError::BatchSizeNotDivisibleByZkpBatchSize` (error code: 14305) - Batch size is not evenly divisible by ZKP batch size +- `MerkleTreeMetadataError::InvalidRolloverThreshold` - Rollover threshold value is invalid (must be percentage) +- `ZeroCopyError::Size` - Account size mismatch during zero-copy deserialization +- `BorshError` - Failed to serialize or deserialize metadata structures diff --git a/program-libs/batched-merkle-tree/docs/INSERT_ADDRESS_QUEUE.md b/program-libs/batched-merkle-tree/docs/INSERT_ADDRESS_QUEUE.md new file mode 100644 index 0000000000..fb340da476 --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/INSERT_ADDRESS_QUEUE.md @@ -0,0 +1,96 @@ +# Insert Into Address Queue + +**path:** src/merkle_tree.rs + +**description:** +Inserts an address into the address tree's integrated address queue when creating a new address for compressed accounts. The bloom filter prevents address reuse by checking that the address doesn't already exist in any batch's bloom filter. The address is stored in the hash chain and will be inserted into the tree by a batch update. The address queue stores addresses in both hash chains and bloom filters until the bloom filter is zeroed (which occurs after the batch is fully inserted into the tree AND the next batch reaches 50% capacity). + +Key characteristics: +1. Inserts address into both bloom filter and hash chain (same value in both) +2. Checks non-inclusion: address must not exist in any bloom filter (prevents address reuse) +3. Checks tree capacity before insertion (address trees have fixed capacity) +4. Increments queue_next_index (address queue index; used by indexers as sequence number) + +The address queue uses a two-batch alternating system to enable zeroing out one bloom filter while the other is still being used for non-inclusion checks. + +**Operation:** +Method: `BatchedMerkleTreeAccount::insert_address_into_queue` + +**Parameters:** +- `address`: &[u8; 32] - Address to insert (32-byte hash) +- `current_slot`: &u64 - Current Solana slot number (sets batch start_slot on first insertion; used by indexers to track when batch started filling, not used for batch logic) + +**Accounts:** +This operation modifies a `BatchedMerkleTreeAccount`: +- Must be type `TreeType::AddressV2` +- Account layout defined in: src/merkle_tree.rs +- Account documentation: TREE_ACCOUNT.md +- Is initialized via `initialize_address_tree` +- Has integrated address queue (bloom filters + hash chains) + +**Operation Logic and Checks:** + +1. **Verify tree type:** + - Check: `tree_type == TreeType::AddressV2` + - Error if state tree (state trees don't have address queues) + +2. **Check tree capacity:** + - Call `check_queue_next_index_reached_tree_capacity()` + - Error if `queue_next_index >= tree_capacity` + - Ensures all queued addresses can be inserted into the tree + +3. **Insert into current batch:** + Calls `insert_into_current_queue_batch` helper which: + + a. **Check batch state (readiness):** + - If batch state is `Fill`: Ready for insertion, continue + - If batch state is `Inserted`: Batch was fully processed, needs clearing: + - Check bloom filter is zeroed; error if not + - Clear hash chain stores (reset all hash chains) + - Advance batch state to `Fill` + - Reset batch metadata (start_index, sequence_number, etc.) + - If batch state is `Full`: Error - batch not ready for insertion + + b. **Insert address into batch:** + - Call `current_batch.insert`: + - Insert address into bloom filter + - Check non-inclusion: address must not exist in any other bloom filter + - Update hash chain with address: `Poseidon(prev_hash_chain, address)` + - Store updated hash chain in hash chain store + - Increment batch's internal element counter + + c. **Check if batch is full:** + - If `num_inserted_elements == batch_size`: + - Transition batch state from `Fill` to `Full` + - Increment `currently_processing_batch_index` (switches to other batch) + - Update `pending_batch_index` (marks this batch ready for tree update) + +4. **Increment queue_next_index:** + - `queue_next_index += 1` + - Used as sequence number by indexers to track address order + +**Validations:** +- Tree must be address tree (enforced by tree type check) +- Tree must not be full: `queue_next_index < tree_capacity` (checked before insertion) +- Batch must be in `Fill` or `Inserted` state (enforced by `insert_into_current_queue_batch`) +- Bloom filter must be zeroed before reuse (enforced when clearing batch in `Inserted` state) +- Non-inclusion check: address must not exist in any bloom filter (prevents address reuse) + +**State Changes:** +- Bloom filter: Stores address for non-inclusion checks +- Hash chain store: Updates running Poseidon hash with address for ZKP batch +- Batch metadata: + - `num_inserted_elements`: Incremented + - `state`: May transition `Fill` → `Full` when batch fills + - `currently_processing_batch_index`: May switch to other batch + - `pending_batch_index`: Updated when batch becomes full +- Tree metadata: + - `queue_next_index`: Always incremented (sequence number for indexers) + +**Errors:** +- `MerkleTreeMetadataError::InvalidTreeType` - Tree is not an address tree (state trees don't support address insertion) +- `BatchedMerkleTreeError::TreeIsFull` (error code: 14310) - Address tree has reached capacity (queue_next_index >= tree_capacity) +- `BatchedMerkleTreeError::BatchNotReady` (error code: 14301) - Batch is in `Full` state and cannot accept insertions +- `BatchedMerkleTreeError::BloomFilterNotZeroed` (error code: 14312) - Attempting to reuse batch before bloom filter has been zeroed by forester +- `BatchedMerkleTreeError::NonInclusionCheckFailed` (error code: 14311) - Address already exists in bloom filter (address reuse attempt) +- `ZeroCopyError` - Failed to access bloom filter stores or hash chain stores diff --git a/program-libs/batched-merkle-tree/docs/INSERT_INPUT_QUEUE.md b/program-libs/batched-merkle-tree/docs/INSERT_INPUT_QUEUE.md new file mode 100644 index 0000000000..edf6a320d4 --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/INSERT_INPUT_QUEUE.md @@ -0,0 +1,97 @@ +# Insert Into Input Queue (Nullifier) + +**path:** src/merkle_tree.rs + +**description:** +Inserts a nullifier into the state tree's integrated input queue when spending a compressed account. The bloom filter prevents double-spending by checking that the compressed account hash doesn't already exist in any batch's bloom filter. The nullifier (which will replace the compressed account hash in the tree once inserted by a batch update) is stored in the hash chain. The input queue stores nullifiers in hash chains and compressed account hashes in bloom filters until the bloom filter is zeroed (which occurs after the batch is fully inserted into the tree AND the next batch reaches 50% capacity). + +Key characteristics: +1. Creates nullifier: `Hash(compressed_account_hash, leaf_index, tx_hash)` +2. Inserts nullifier into hash chain (value that will replace the leaf in the tree) +3. Inserts compressed_account_hash into bloom filter (for non-inclusion checks in subsequent transactions) +4. Checks non-inclusion: compressed_account_hash must not exist in any bloom filter (prevents double-spending) +5. Increments nullifier_next_index (nullifier queue index; used by indexers as sequence number) + +The input queue uses a two-batch alternating system to enable zeroing out one bloom filter while the other is still being used for non-inclusion checks. + +**Operation:** +Method: `BatchedMerkleTreeAccount::insert_nullifier_into_queue` + +**Parameters:** +- `compressed_account_hash`: &[u8; 32] - Hash of compressed account being nullified +- `leaf_index`: u64 - Index in the tree where the compressed account exists (note: although leaf_index is already inside the compressed_account_hash, it's added to the nullifier hash to expose it efficiently in the batch update ZKP) +- `tx_hash`: &[u8; 32] - Transaction hash; enables ZK proofs showing how a compressed account was spent and what other accounts exist in that transaction +- `current_slot`: &u64 - Current Solana slot number (sets batch start_slot on first insertion; used by indexers to track when batch started filling, not used for batch logic) + +**Accounts:** +This operation modifies a `BatchedMerkleTreeAccount`: +- Must be type `TreeType::StateV2` (we nullify state not addresses) +- Account layout defined in: src/merkle_tree.rs +- Account documentation: TREE_ACCOUNT.md +- Is initialized via `initialize_state_tree` +- Has integrated input queue (bloom filters + hash chains) + +**Operation Logic and Checks:** + +1. **Verify tree type:** + - Check: `tree_type == TreeType::StateV2` + - Error if address tree + +2. **Create nullifier:** + - Compute: `nullifier = Hash(compressed_account_hash, leaf_index, tx_hash)` + - Nullifier is transaction-specific (depends on tx_hash) + - Note, a nullifier could be any value other than the original compressed_account_hash. The only requirement is that post nullifier insertion we cannot prove inclusion of the original compressed_account_hash in the tree. + +3. **Insert into current batch:** + Calls `insert_into_current_queue_batch` helper which: + + a. **Check batch state (readiness):** + - If batch state is `Fill`: Ready for insertion, continue + - If batch state is `Inserted`: Batch was fully processed, needs clearing: + - Check bloom filter is zeroed; error if not + - Clear hash chain stores (reset all hash chains) + - Advance batch state to `Fill` + - Reset batch metadata (start_index, sequence_number, etc.) + - If batch state is `Full`: Error - batch not ready for insertion + + b. **Insert values into batch:** + - Call `current_batch.insert`: + - Insert compressed_account_hash into bloom filter (NOT the nullifier, since nullifier is tx-specific) + - Check non-inclusion: compressed_account_hash must not exist in any other bloom filter + - Update hash chain with nullifier: `Poseidon(prev_hash_chain, nullifier)` + - Store updated hash chain in hash chain store + - Increment batch's internal element counter + + c. **Check if batch is full:** + - If `num_inserted_elements == batch_size`: + - Transition batch state from `Fill` to `Full` + - Increment `currently_processing_batch_index` (switches to other batch) + - Update `pending_batch_index` (marks this batch ready for tree update) + +4. **Increment nullifier_next_index:** + - `nullifier_next_index += 1` + - Used as sequence number by indexers to track nullifier order + +**Validations:** +- Tree must be state tree (enforced by tree type check) +- Batch must be in `Fill` or `Inserted` state (enforced by `insert_into_current_queue_batch`) +- Bloom filter must be zeroed before reuse (enforced when clearing batch in `Inserted` state) +- Non-inclusion check: compressed_account_hash must not exist in any bloom filter (prevents double-spending) + +**State Changes:** +- Bloom filter: Stores compressed_account_hash for non-inclusion checks +- Hash chain store: Updates running Poseidon hash with nullifier for ZKP batch +- Batch metadata: + - `num_inserted_elements`: Incremented + - `state`: May transition `Fill` → `Full` when batch fills + - `currently_processing_batch_index`: May switch to other batch + - `pending_batch_index`: Updated when batch becomes full +- Tree metadata: + - `nullifier_next_index`: Always incremented (sequence number for indexers) + +**Errors:** +- `MerkleTreeMetadataError::InvalidTreeType` - Tree is not a state tree (address trees don't support nullifiers) +- `BatchedMerkleTreeError::BatchNotReady` (error code: 14301) - Batch is in `Full` state and cannot accept insertions +- `BatchedMerkleTreeError::BloomFilterNotZeroed` (error code: 14312) - Attempting to reuse batch before bloom filter has been zeroed by forester +- `BatchedMerkleTreeError::NonInclusionCheckFailed` (error code: 14311) - compressed_account_hash already exists in bloom filter (double-spend attempt) +- `ZeroCopyError` - Failed to access bloom filter stores or hash chain stores diff --git a/program-libs/batched-merkle-tree/docs/INSERT_OUTPUT_QUEUE.md b/program-libs/batched-merkle-tree/docs/INSERT_OUTPUT_QUEUE.md new file mode 100644 index 0000000000..4eab752d2e --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/INSERT_OUTPUT_QUEUE.md @@ -0,0 +1,90 @@ +# Insert Into Output Queue + +**path:** src/queue.rs + +**description:** +Inserts a compressed account hash into the output queue's currently processing batch. Output queues store compressed account hashes until the batch is zeroed (which occurs after the batch is fully inserted into the tree AND the next batch reaches 50% capacity). + +Key characteristics: +1. Inserts values into value vec (for immediate spending via proof-by-index) +2. Updates hash chain (for ZKP verification) +3. Automatically transitions batches when full (Fill → Full state when num_inserted_elements reaches batch_size) +4. Assigns leaf index at insertion (increments next_index; tree insertion order is determined at queue insertion) +5. No bloom filters (only input queues use bloom filters) + +The output queue uses a two-batch alternating system. The alternating batch system is not strictly necessary for output queues (no bloom filters to zero out), but is used to unify input and output queue code. + +Output queues enable **immediate spending**: Values can be spent via proof-by-index before tree insertion. Unlike input queues that only store bloom filters, output queues store actual values in value vecs for proof-by-index. Hash chains are used as public inputs when verifying the ZKP that appends this batch to the tree. + +**Operation:** +Method: `BatchedQueueAccount::insert_into_current_batch` + +**Parameters:** +- `hash_chain_value`: &[u8; 32] - Compressed account hash to insert +- `current_slot`: &u64 - Current Solana slot number (sets batch start_slot on first insertion; used by indexers to track when batch started filling, not used for batch logic) + +**Accounts:** +This operation modifies a `BatchedQueueAccount`: +- Must be type `QueueType::OutputStateV2` +- Account layout defined in: src/queue.rs +- Must have been initialized via `initialize_state_tree` +- Associated with a state Merkle tree + +**Operation Logic and Checks:** + +1. **Get current insertion index:** + - Read `batch_metadata.next_index` to determine leaf index for this value + - This index is used for proof-by-index when spending compressed accounts + - Is the leaf index of the value up on Merkle tree insertion. + +2. **Insert into current batch:** + Calls `insert_into_current_queue_batch` helper which: + + a. **Check batch state (readiness):** + - If batch state is `Fill`: Ready for insertion, continue + - If batch state is `Inserted`: Batch was fully processed, needs clearing: + - Clear value vec (reset all values to zero) + - Clear hash chain stores (reset all hash chains) + - Advance batch state to `Fill` + - Reset batch metadata (start_index, sequence_number, etc.) + - If batch state is `Full`: Error - batch not ready for insertion + + b. **Insert value into batch:** + - Call `current_batch.store_and_hash_value`: + - Store hash_chain_value in value vec at next position + - Update hash chain: + - Get current ZKP batch index + - Hash: `Poseidon(prev_hash_chain, hash_chain_value)` + - Store updated hash chain in hash chain store + - Increment batch's internal element counter + + c. **Check if batch is full:** + - If `num_inserted_elements == batch_size`: + - Transition batch state from `Fill` to `Full` + - Increment `currently_processing_batch_index` (switches to other batch) + - Update `pending_batch_index` (marks this batch ready for tree update) + +3. **Increment queue next_index:** + - `batch_metadata.next_index += 1` + - The assigned leaf index in the tree (tree insertion order is determined at queue insertion) + +**Validations:** +- Batch must be in `Fill` or `Inserted` state (enforced by `insert_into_current_queue_batch`) +- Tree must not be full: `next_index < tree_capacity` (checked by caller before insertion) + +**State Changes:** +- Value vec: Stores compressed account hash at index position +- Hash chain store: Updates running Poseidon hash for ZKP batch +- Batch metadata: + - `num_inserted_elements`: Incremented + - `state`: May transition `Fill` → `Full` when batch fills + - `currently_processing_batch_index`: May switch to other batch + - `pending_batch_index`: Updated when batch becomes full +- Queue metadata: + - `next_index`: Always incremented (leaf index for this value) + +**Errors:** +- `BatchedMerkleTreeError::TreeIsFull` (error code: 14310) - Output queue has reached tree capacity (next_index >= tree_capacity) +- `BatchedMerkleTreeError::BatchNotReady` (error code: 14301) - Batch is in `Full` state and cannot accept insertions +- `BatchedMerkleTreeError::BloomFilterNotZeroed` (error code: 14312) - N/A for output queues (no bloom filters) +- `ZeroCopyError` - Failed to access value vec or hash chain stores diff --git a/program-libs/batched-merkle-tree/docs/QUEUE_ACCOUNT.md b/program-libs/batched-merkle-tree/docs/QUEUE_ACCOUNT.md new file mode 100644 index 0000000000..dfd9285fdf --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/QUEUE_ACCOUNT.md @@ -0,0 +1,100 @@ +# BatchedQueueAccount + +**Description:** +Output queue account for state trees that temporarily stores compressed account hashes. Enables immediate spending of newly created compressed accounts via proof-by-index. + +**Note:** In the current implementation, `BatchedQueueAccount` is always an output queue (type `OutputStateV2`). Input queues are integrated into the `BatchedMerkleTreeAccount`. + +**Discriminator:** b`queueacc` `[113, 117, 101, 117, 101, 97, 99, 99]` (8 bytes) + +**Path:** +- Struct: `src/queue.rs` - `BatchedQueueAccount` +- Metadata: `src/queue.rs` - `BatchedQueueMetadata` + +## Components + +### 1. Metadata (`BatchedQueueMetadata`) +- Queue metadata (queue type, associated merkle tree) +- Batch metadata (`QueueBatches`): + - Batch sizes (`batch_size`, `zkp_batch_size`) + - `currently_processing_batch_index`: Index of batch accepting new insertions (Fill state) + - `pending_batch_index`: Index of batch ready for ZKP processing and tree insertion (Full or being incrementally inserted) + - Two `Batch` structures tracking state and progress + - **Note:** These indices can differ, enabling parallel insertion while tree updates from the previous batch are being verified +- Tree capacity +- Hashed merkle tree pubkey +- Hashed queue pubkey + +### 2. Value Vecs (`[ZeroCopyVecU64<[u8; 32]>; 2]`) +- Two value vectors, one per batch +- Stores the actual compressed account hashes +- Values accessible by leaf index even before tree insertion +- Enables proof-by-index for immediate spending + +### 3. Hash Chain Stores (`[ZeroCopyVecU64<[u8; 32]>; 2]`) +- Two hash chain vectors, one per batch +- Each batch has `batch_size / zkp_batch_size` hash chains +- Each hash chain stores Poseidon hash of all values in that ZKP batch +- Used as public inputs for batch append ZKP verification + +**Note:** Output queues do NOT have bloom filters (only input queues use bloom filters). + +## Serialization + +All deserialization is zero-copy. + +**In Solana programs:** +```rust +use light_batched_merkle_tree::queue::BatchedQueueAccount; +use light_account_checks::AccountInfoTrait; + +// Deserialize output queue +let queue = BatchedQueueAccount::output_from_account_info(account_info)?; +``` + +**In client code:** +```rust +use light_batched_merkle_tree::queue::BatchedQueueAccount; + +// Deserialize output queue +let queue = BatchedQueueAccount::output_from_bytes(&mut account_data)?; +``` + +## Account Validation + +**`output_from_account_info` checks:** +1. Account owned by Light account compression program (`check_owner` using `light-account-checks`) +2. Account discriminator is `queueacc` (`check_discriminator` using `light-account-checks`) +3. Queue type is `OUTPUT_STATE_QUEUE_TYPE_V2` + +**`output_from_bytes` checks (client only):** +1. Account discriminator is `queueacc` +2. Queue type is `OUTPUT_STATE_QUEUE_TYPE_V2` + +**Error codes:** +- `AccountError::AccountOwnedByWrongProgram` (12012) - Account not owned by compression program +- `AccountError::InvalidAccountSize` (12006) - Account size less than 8 bytes +- `AccountError::InvalidDiscriminator` (12007) - Discriminator mismatch +- `MerkleTreeMetadataError::InvalidQueueType` - Queue type mismatch + +## Associated Operations + +- [INITIALIZE_STATE_TREE.md](INITIALIZE_STATE_TREE.md) - Create output queue with state tree +- [INSERT_OUTPUT_QUEUE.md](INSERT_OUTPUT_QUEUE.md) - Insert compressed account hashes +- [UPDATE_FROM_OUTPUT_QUEUE.md](UPDATE_FROM_OUTPUT_QUEUE.md) - Update tree from output queue with ZKP + +## Supporting Structures + +### BatchedQueueMetadata + +**Description:** +Metadata for a batched queue account (output queues only). + +**Path:** `src/queue.rs` + +**Key Fields:** +- `metadata`: Base `QueueMetadata` (queue type, associated merkle tree) +- `batch_metadata`: `QueueBatches` structure +- `tree_capacity`: Associated tree's capacity (2^height). Checked on insertion to prevent overflow +- `hashed_merkle_tree_pubkey`: Pre-hashed tree pubkey (31 bytes + 1 padding). Pubkeys are hashed and truncated to 31 bytes (248 bits) to fit within bn254 field size requirements for Poseidon hashing in ZK circuits +- `hashed_queue_pubkey`: Pre-hashed queue pubkey (31 bytes + 1 padding). Same truncation for bn254 field compatibility diff --git a/program-libs/batched-merkle-tree/docs/TREE_ACCOUNT.md b/program-libs/batched-merkle-tree/docs/TREE_ACCOUNT.md new file mode 100644 index 0000000000..9dc3858303 --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/TREE_ACCOUNT.md @@ -0,0 +1,195 @@ +# BatchedMerkleTreeAccount + +**Description:** +The main Merkle tree account that stores tree roots, root history, and integrated input queue (bloom filters + hash chains for nullifiers or addresses). Used for both state trees and address trees. + +**Discriminator:** b`BatchMta` `[66, 97, 116, 99, 104, 77, 116, 97]` (8 bytes) + +**Path:** +- Struct: `src/merkle_tree.rs` - `BatchedMerkleTreeAccount` +- Metadata: `src/merkle_tree_metadata.rs` - `BatchedMerkleTreeMetadata` + +## Components + +### 1. Metadata (`BatchedMerkleTreeMetadata`) +- Tree type: `TreeType::StateV2` or `TreeType::AddressV2` +- Tree height and capacity (2^height leaves) +- Sequence number (increments with each batched tree update (not input or output queue insertions)) +- Next index (next available leaf index) +- Nullifier next index (for state trees, address/nullifier queue index) +- Root history capacity +- Queue batch metadata +- Hashed pubkey (31 bytes for bn254 field compatibility) + +### 2. Root History (`ZeroCopyCyclicVecU64<[u8; 32]>`) +- Type defined in: program-libs/zero-copy/src/cyclic_vec.rs +- Cyclic buffer storing recent tree roots (when full, oldest root is overwritten with newest root) +- Default capacity: 200 roots +- `first()` returns oldest root, `last()` returns newest root +- Latest root accessed via `root_history.last()` +- Validity proofs pick root by index from root history + since proofs need a static root value to verify against. + +### 3. Bloom Filter Stores (`[&mut [u8]; 2]`) +- Two bloom filters, one per batch +- Used only for input queues (nullifiers for state trees, addresses for address trees) +- Ensures no duplicate insertions in the queue. +- Zeroed after batch is fully inserted and next batch is 50% full and at least one batch update occured since batch completion. + +### 4. Hash Chain Stores (`[ZeroCopyVecU64<[u8; 32]>; 2]`) +- Two hash chain vectors, one per batch (length = `batch_size / zkp_batch_size`) +- Each hash chain stores Poseidon hash of all values in that ZKP batch +- Used as public inputs for ZKP verification + +## Tree Type Variants + +### State Tree +- Tree type: `STATE_MERKLE_TREE_TYPE_V2` +- Has separate `BatchedQueueAccount` for output operations (appending compressed accounts) +- Uses integrated input queue for nullifier operations +- Initial root: zero bytes root for specified height + +### Address Tree +- Tree type: `ADDRESS_MERKLE_TREE_TYPE_V2` +- No separate output queue (only integrated input queue for address insertions) +- Initial root: `ADDRESS_TREE_INIT_ROOT_40` (hardcoded for height 40) +- Starts with next_index = 1 (pre-initialized with one element at index 0) + +## Serialization + +All deserialization is zero-copy. + +**In Solana programs:** +```rust +use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; +use light_account_checks::AccountInfoTrait; + +// Deserialize state tree +let tree = BatchedMerkleTreeAccount::state_from_account_info(account_info)?; + +// Deserialize address tree +let tree = BatchedMerkleTreeAccount::address_from_account_info(account_info)?; + +// Access root by index +let root = tree.get_root_by_index(index)?; +``` + +**In client code:** +```rust +use light_batched_merkle_tree::merkle_tree::BatchedMerkleTreeAccount; + +// Deserialize state tree +let tree = BatchedMerkleTreeAccount::state_from_bytes(&mut account_data, &pubkey)?; + +// Deserialize address tree +let tree = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey)?; +``` + +## Account Validation + +**`state_from_account_info` checks:** +1. Account owned by Light account compression program (`check_owner` using `light-account-checks`) +2. Account discriminator is `BatchMta` (`check_discriminator` using `light-account-checks`) +3. Tree type is `STATE_MERKLE_TREE_TYPE_V2` (4) + +**`address_from_account_info` checks:** +1. Account owned by Light account compression program (`check_owner` using `light-account-checks`) +2. Account discriminator is `BatchMta` (`check_discriminator` using `light-account-checks`) +3. Tree type is `ADDRESS_MERKLE_TREE_TYPE_V2` (5) + +**`state_from_bytes` checks (client only):** +1. Account discriminator is `BatchMta` +2. Tree type is `STATE_MERKLE_TREE_TYPE_V2` (4) + +**`address_from_bytes` checks (client only):** +1. Account discriminator is `BatchMta` +2. Tree type is `ADDRESS_MERKLE_TREE_TYPE_V2` (5) + +**Error codes:** +- `AccountError::AccountOwnedByWrongProgram` (12012) - Account not owned by compression program +- `AccountError::InvalidAccountSize` (12006) - Account size less than 8 bytes +- `AccountError::InvalidDiscriminator` (12007) - Discriminator mismatch +- `MerkleTreeMetadataError::InvalidTreeType` - Tree type mismatch (state vs address) + +## Associated Operations + +- [INITIALIZE_STATE_TREE.md](INITIALIZE_STATE_TREE.md) - Create state tree +- [INITIALIZE_ADDRESS_TREE.md](INITIALIZE_ADDRESS_TREE.md) - Create address tree +- [INSERT_INPUT_QUEUE.md](INSERT_INPUT_QUEUE.md) - Insert nullifiers (state trees) +- [INSERT_ADDRESS_QUEUE.md](INSERT_ADDRESS_QUEUE.md) - Insert addresses (address trees) +- [UPDATE_FROM_INPUT_QUEUE.md](UPDATE_FROM_INPUT_QUEUE.md) - Update tree from input queue with ZKP + +## Supporting Structures + +### Batch + +**Description:** +State machine tracking the lifecycle of a single batch from filling to insertion. + +**Path:** `src/batch.rs` + +**States:** +- **Fill** (0) - Batch is accepting new insertions. ZKP processing can begin as soon as individual ZKP batches are complete (when `num_full_zkp_batches > 0`) +- **Full** (2) - All ZKP batches are complete (`num_full_zkp_batches == batch_size / zkp_batch_size`). No more insertions accepted +- **Inserted** (1) - All ZKP batches have been inserted into the tree + +**State Transitions:** +- Fill → Full: When all ZKP batches are complete (`num_full_zkp_batches == batch_size / zkp_batch_size`) +- Full → Inserted: When all ZKP batches are inserted into tree (`num_inserted_zkp_batches == num_full_zkp_batches`) +- Inserted → Fill: When batch is reset for reuse (after bloom filter zeroing) + +**Key Insight:** ZKP processing happens incrementally. A batch doesn't need to be in Full state for ZKP processing to begin - individual ZKP batches can be processed as soon as they're complete, even while the overall batch is still in Fill state. + +**Key Fields:** +- `num_inserted`: Number of elements inserted in the current batch +- `num_full_zkp_batches`: Number of ZKP batches ready for insertion +- `num_inserted_zkp_batches`: Number of ZKP batches already inserted into tree +- `sequence_number`: Threshold value set at batch insertion (`tree_seq + root_history_capacity`). Used to detect if sufficient tree updates have occurred since batch insertion to overwrite the last root that was inserted with this batch. When clearing bloom filter, overlapping roots in history must also be zeroed to prevent inclusion proofs of nullified values +- `root_index`: Root index at batch insertion. Identifies which roots in history could prove inclusion of values from this batch's bloom filter. These roots are zeroed when clearing the bloom filter +- `start_index`: Starting leaf index for this batch +- `start_slot`: Slot of first insertion (for indexer reindexing) +- `bloom_filter_is_zeroed`: Whether bloom filter has been zeroed + +### QueueBatches + +**Description:** +Metadata structure managing the 2-batch system for queues. + +**Path:** `src/queue_batch_metadata.rs` + +**Key Fields:** +- `num_batches`: Always 2 (alternating batches) +- `batch_size`: Number of elements in a full batch +- `zkp_batch_size`: Number of elements per ZKP batch (batch_size must be divisible by zkp_batch_size) +- `bloom_filter_capacity`: Bloom filter size in bits (0 for output queues) +- `currently_processing_batch_index`: Index of batch accepting new insertions (Fill state) +- `pending_batch_index`: Index of batch ready for ZKP processing and tree insertion (Full or being incrementally inserted) +- `next_index`: Next available leaf index in queue +- `batches`: Array of 2 `Batch` structures + +**Variants:** +- **Output Queue** (`new_output_queue`): No bloom filters, has value vecs +- **Input Queue** (`new_input_queue`): Has bloom filters, no value vecs + +**Key Validation:** +- `batch_size` must be divisible by `zkp_batch_size` +- Error: `BatchSizeNotDivisibleByZkpBatchSize` if not + +### BatchedMerkleTreeMetadata + +**Description:** +Complete metadata for a batched Merkle tree account. + +**Path:** `src/merkle_tree_metadata.rs` + +**Key Fields:** +- `tree_type`: `TreeType::StateV2` (4) or `TreeType::AddressV2` (5) +- `metadata`: Base `MerkleTreeMetadata` (access control, rollover, etc.) +- `sequence_number`: Increments with each tree update +- `next_index`: Next available leaf index in tree +- `nullifier_next_index`: Nullifier sequence tracker (state trees only) +- `height`: Tree height (default: 32 for state, 40 for address) +- `capacity`: Maximum leaves (2^height) +- `root_history_capacity`: Size of root history buffer (default: 200) +- `queue_batches`: Queue batch metadata +- `hashed_pubkey`: Pre-hashed tree pubkey (31 bytes + 1 padding). Pubkeys are hashed and truncated to 31 bytes (248 bits) to fit within bn254 field size requirements for Poseidon hashing in ZK circuits diff --git a/program-libs/batched-merkle-tree/docs/UPDATE_FROM_INPUT_QUEUE.md b/program-libs/batched-merkle-tree/docs/UPDATE_FROM_INPUT_QUEUE.md new file mode 100644 index 0000000000..623bdab295 --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/UPDATE_FROM_INPUT_QUEUE.md @@ -0,0 +1,264 @@ +# Update Tree From Input Queue + +**path:** src/merkle_tree.rs + +**description:** +Batch updates Merkle tree from input queue with zero-knowledge proof verification. This operation covers two distinct update types: + +1. **Batch Nullify** (State Trees): Nullifies existing leaves by overwriting compressed account hashes with nullifiers +2. **Batch Address Append** (Address Trees): Appends new addresses to the tree using indexed Merkle tree insertion + +Both operations process one ZKP batch at a time, verifying correctness of: old root + queue values → new root. + +**Circuit implementations:** +- Batch nullify: /Users/ananas/dev/light-protocol/prover/server/prover/v2/batch_update_circuit.go +- Batch address append: /Users/ananas/dev/light-protocol/prover/server/prover/v2/batch_address_append_circuit.go + +Key characteristics: +1. Verifies ZKP proving correctness of: old root + queue values → new root +2. Updates tree root +3. Increments tree sequence_number (tracks number of tree updates) +4. For address trees: increments tree next_index by zkp_batch_size +5. For state trees: increments nullifier_next_index (offchain indexer tracking only) +6. Marks ZKP batch as inserted in the queue +7. Transitions batch state to Inserted when all ZKP batches complete +8. Zeros out bloom filter when current batch is 50% inserted + +**Operations:** + +## Batch Nullify (State Trees) + +Method: `BatchedMerkleTreeAccount::update_tree_from_input_queue` + +**Parameters:** +- `instruction_data`: InstructionDataBatchNullifyInputs - Contains new_root and compressed ZK proof + +**Accounts:** +- `BatchedMerkleTreeAccount` (state tree): + - Must be type `TreeType::StateV2` + - Contains integrated input queue with nullifiers + - Account layout defined in: src/merkle_tree.rs + - Account documentation: TREE_ACCOUNT.md + +**Public inputs for ZKP verification:** +- old_root: Current tree root before update +- new_root: New tree root after batch nullify +- leaves_hash_chain: Hash chain from input queue (nullifiers) +- Public input hash: Hash([old_root, new_root, leaves_hash_chain]) + +**What the ZKP (circuit) proves:** + +The batch update circuit proves that nullifiers have been correctly inserted into the Merkle tree: + +1. **Verify public input hash:** + - Computes Hash([old_root, new_root, leaves_hash_chain]) + - Asserts equals circuit.PublicInputHash + +2. **Create and verify nullifiers:** + - For each position i in batch (zkp_batch_size): + - Computes nullifier[i] = Hash(Leaves[i], PathIndices[i], TxHashes[i]) + - Where Leaves[i] is the compressed_account_hash being nullified + - PathIndices[i] is the leaf index in the tree + - TxHashes[i] is the transaction hash + - Computes hash chain of all nullifiers + - Asserts equals circuit.LeavesHashchainHash + +3. **Perform Merkle updates:** + - Initialize running root = circuit.OldRoot + - For each position i (zkp_batch_size positions): + - Convert PathIndices[i] to binary (tree height bits) + - Call MerkleRootUpdateGadget: + - OldRoot: running root + - OldLeaf: circuit.OldLeaves[i] (can be 0 if not yet appended, or compressed_account_hash) + - NewLeaf: nullifier[i] + - PathIndex: PathIndices[i] as bits + - MerkleProof: circuit.MerkleProofs[i] + - Height: tree height + - Update running root with result + - Assert final running root equals circuit.NewRoot + +4. **Public inputs:** Hash([old_root, new_root, leaves_hash_chain]) + +**Key circuit characteristics:** +- Path index is included in nullifier hash to ensure correct leaf is nullified even when old_leaf is 0 +- Since input and output queues are independent, nullifiers can be inserted before values are appended to the tree +- Merkle proof verifies old_leaf value against onchain root, ensuring correct position +- If old_leaf is 0: value not yet appended, but path index in nullifier ensures correct future position +- If old_leaf is non-zero: should equal compressed_account_hash (verified by Merkle proof) + +## Batch Address Append (Address Trees) + +Method: `BatchedMerkleTreeAccount::update_tree_from_address_queue` + +**Parameters:** +- `instruction_data`: InstructionDataAddressAppendInputs - Contains new_root and compressed ZK proof + +**Accounts:** +- `BatchedMerkleTreeAccount` (address tree): + - Must be type `TreeType::AddressV2` + - Contains integrated input queue with addresses + - Account layout defined in: src/merkle_tree.rs + - Account documentation: TREE_ACCOUNT.md + +**Public inputs for ZKP verification:** +- old_root: Current tree root before update +- new_root: New tree root after batch address append +- leaves_hash_chain: Hash chain from address queue (addresses) +- start_index: Tree next_index (where batch append begins) +- Public input hash: Hash([old_root, new_root, leaves_hash_chain, start_index]) + +**What the ZKP (circuit) proves:** + +The batch address append circuit proves that addresses have been correctly appended using indexed Merkle tree insertion: + +1. **Initialize running root:** + - Set current root = circuit.OldRoot + +2. **For each address i in batch (zkp_batch_size positions):** + + a. **Update low leaf (insert into sorted linked list):** + - Compute old low leaf hash: + - Uses LeafHashGadget to verify old low leaf structure + - Inputs: LowElementValues[i], LowElementNextValues[i], NewElementValues[i] + - Verifies low_value < new_address < low_next_value (sorted order) + - Compute new low leaf hash: + - Hash(LowElementValues[i], NewElementValues[i]) + - Updates low leaf to point to new address instead of old next value + - Convert LowElementIndices[i] to binary (tree height bits) + - Call MerkleRootUpdateGadget: + - OldRoot: current root + - OldLeaf: old low leaf hash + - NewLeaf: new low leaf hash (Hash(low_value, new_address)) + - PathIndex: LowElementIndices[i] as bits + - MerkleProof: circuit.LowElementProofs[i] + - Height: tree height + - Update current root with result + + b. **Insert new leaf:** + - Compute new leaf hash: + - Hash(NewElementValues[i], LowElementNextValues[i]) + - New address points to what low leaf previously pointed to + - Compute insertion index: start_index + i + - Convert insertion index to binary (tree height bits) + - Call MerkleRootUpdateGadget: + - OldRoot: current root (after low leaf update) + - OldLeaf: 0 (position must be empty) + - NewLeaf: new leaf hash (Hash(new_address, low_next_value)) + - PathIndex: (start_index + i) as bits + - MerkleProof: circuit.NewElementProofs[i] + - Height: tree height + - Update current root with result + +3. **Verify final root:** + - Assert current root equals circuit.NewRoot + +4. **Verify leaves hash chain:** + - Compute hash chain of all NewElementValues + - Assert equals circuit.HashchainHash + +5. **Verify public input hash:** + - Compute Hash([old_root, new_root, hash_chain, start_index]) + - Assert equals circuit.PublicInputHash + +6. **Public inputs:** Hash([old_root, new_root, leaves_hash_chain, start_index]) + +**Key circuit characteristics:** +- Performs TWO Merkle updates per address (low leaf update + new leaf insertion) +- Maintains sorted order via indexed Merkle tree linked list structure +- Verifies new address fits between low_value and low_next_value (sorted insertion) +- New leaf position must be empty (old_leaf = 0) +- Enables efficient non-inclusion proofs (prove address not in sorted tree) + +## Operation Logic and Checks (Both Operations) + +1. **Check tree type:** + - Nullify: Verify tree type is `TreeType::StateV2` + - Address: Verify tree type is `TreeType::AddressV2` + +2. **Check tree capacity (address trees only):** + - Verify: `tree.next_index + zkp_batch_size <= tree_capacity` + - Error if tree would exceed capacity after this batch + +3. **Get batch information:** + - Get `pending_batch_index` from queue (batch ready for tree insertion) + - Get `first_ready_zkp_batch_index` from batch (next ZKP batch to insert) + - Verify batch has ready ZKP batches: `num_full_zkp_batches > num_inserted_zkp_batches` + +4. **Create public inputs hash:** + - Get `leaves_hash_chain` from hash chain store for this ZKP batch + - Get `old_root` from tree root history (most recent root) + - Nullify: Compute `public_input_hash = Hash([old_root, new_root, leaves_hash_chain])` + - Address: Get `start_index` from tree, compute `public_input_hash = Hash([old_root, new_root, leaves_hash_chain, start_index])` + +5. **Verify ZKP and update tree:** + Calls `verify_update` which: + - Nullify: Verifies proof with `verify_batch_update(zkp_batch_size, public_input_hash, proof)` + - Address: Verifies proof with `verify_batch_address_update(zkp_batch_size, public_input_hash, proof)` + - Increments sequence_number (tree update counter) + - Appends new_root to root_history (cyclic buffer) + - Nullify: Increments nullifier_next_index by zkp_batch_size (offchain indexer tracking) + - Address: Increments tree next_index by zkp_batch_size (new leaves appended) + - Returns (old_next_index, new_next_index) for event + +6. **Mark ZKP batch as inserted:** + - Call `mark_as_inserted_in_merkle_tree` on batch: + - Increment `num_inserted_zkp_batches` + - If all ZKP batches inserted: + - Set batch `sequence_number = tree_sequence_number + root_history_capacity` (threshold at which root at root_index has been overwritten in cyclic root history) + - Set batch `root_index` (identifies root that must not exist when bloom filter is zeroed) + - Transition batch state to `Inserted` + - Return batch state for next step + +7. **Increment pending_batch_index if batch complete:** + - If batch state is now `Inserted`: + - Increment `pending_batch_index` (switches to other batch) + +8. **Zero out bloom filter if ready:** + - Same mechanism as described in UPDATE_FROM_OUTPUT_QUEUE.md + - See that document for detailed explanation of bloom filter and root zeroing + +9. **Return batch event:** + - Contains merkle_tree_pubkey, batch indices, root info, next_index range + - Nullify: No output_queue_pubkey + - Address: No output_queue_pubkey + +**Validations:** +- Tree type must match operation (StateV2 for nullify, AddressV2 for address) +- Address trees: Tree must not be full after this batch insertion +- Batch must have ready ZKP batches: `num_full_zkp_batches > num_inserted_zkp_batches` +- Batch must not be in `Inserted` state +- ZKP must verify correctly against public inputs + +**State Changes:** + +**Tree account (Nullify - State Trees):** +- `nullifier_next_index`: Incremented by zkp_batch_size (offchain indexer tracking) +- `sequence_number`: Incremented by 1 (tracks tree updates) +- `root_history`: New root appended (cyclic buffer, may overwrite oldest) +- Input queue bloom filter: May be zeroed if current batch is 50% inserted AND previous batch is fully inserted AND bloom filter not yet zeroed + +**Tree account (Address Append - Address Trees):** +- `next_index`: Incremented by zkp_batch_size (new leaves appended) +- `sequence_number`: Incremented by 1 (tracks tree updates) +- `root_history`: New root appended (cyclic buffer, may overwrite oldest) +- Input queue bloom filter: May be zeroed if current batch is 50% inserted AND previous batch is fully inserted AND bloom filter not yet zeroed + +**Input queue (Both):** +- Batch `num_inserted_zkp_batches`: Incremented +- Batch `state`: May transition to `Inserted` when all ZKP batches complete +- Batch `sequence_number`: Set to `tree_sequence_number + root_history_capacity` when batch fully inserted (threshold at which root at root_index has been overwritten in cyclic root history) +- Batch `root_index`: Set when batch fully inserted (identifies root that must not exist when bloom filter is zeroed) +- Queue `pending_batch_index`: May increment when batch complete + +**Errors:** +- `MerkleTreeMetadataError::InvalidTreeType` (error code: 14007) - Tree type doesn't match operation +- `MerkleTreeMetadataError::InvalidQueueType` (error code: 14004) - Queue type invalid +- `BatchedMerkleTreeError::TreeIsFull` (error code: 14310) - Address tree would exceed capacity after this batch +- `BatchedMerkleTreeError::BatchNotReady` (error code: 14301) - Batch is not in correct state for insertion +- `BatchedMerkleTreeError::InvalidIndex` (error code: 14309) - Root history is empty or index out of bounds +- `BatchedMerkleTreeError::InvalidBatchIndex` (error code: 14308) - Batch index out of range +- `BatchedMerkleTreeError::CannotZeroCompleteRootHistory` (error code: 14313) - Cannot zero out complete or more than complete root history +- `VerifierError::ProofVerificationFailed` (error code: 13006) - ZKP verification failed (proof is invalid) +- `VerifierError::InvalidPublicInputsLength` (error code: 13004) - Public inputs length doesn't match expected +- `ZeroCopyError` (error codes: 15001-15017) - Failed to access root history or hash chain stores +- `HasherError` (error codes: 7001-7012) - Hashing operation failed diff --git a/program-libs/batched-merkle-tree/docs/UPDATE_FROM_OUTPUT_QUEUE.md b/program-libs/batched-merkle-tree/docs/UPDATE_FROM_OUTPUT_QUEUE.md new file mode 100644 index 0000000000..61dfffee5b --- /dev/null +++ b/program-libs/batched-merkle-tree/docs/UPDATE_FROM_OUTPUT_QUEUE.md @@ -0,0 +1,191 @@ +# Update Tree From Output Queue + +**path:** src/merkle_tree.rs + +**description:** +Batch appends values from the output queue to the state Merkle tree with zero-knowledge proof verification. This operation processes one ZKP batch at a time, verifying that the tree update from old root + queue values → new root is correct. The ZKP proves that the batch of values from the output queue has been correctly appended to the tree. + +**Circuit implementation:** /Users/ananas/dev/light-protocol/prover/server/prover/v2/batch_append_circuit.go + +Key characteristics: +1. Verifies ZKP proving correctness of: old root + queue values → new root +2. Updates tree root and increments tree next_index by zkp_batch_size +3. Increments tree sequence_number (tracks number of tree updates) +4. Marks ZKP batch as inserted in the queue +5. Transitions batch state to Inserted when all ZKP batches of a batch are complete +6. Zeros out input queue bloom filter when current batch is 50% inserted + +Public inputs for ZKP verification: +- old_root: Current tree root before update +- new_root: New tree root after batch append +- leaves_hash_chain: Hash chain from output queue (commitment to queue values) +- start_index: Tree index where batch append begins + +**Operation:** +Method: `BatchedMerkleTreeAccount::update_tree_from_output_queue_account` + +**Parameters:** +- `queue_account`: &mut BatchedQueueAccount - Output queue account containing values to append +- `instruction_data`: InstructionDataBatchAppendInputs - Contains new_root and compressed ZK proof + +**Accounts:** +This operation modifies: +1. `BatchedMerkleTreeAccount` (state tree): + - Must be type `TreeType::StateV2` + - Account layout defined in: src/merkle_tree.rs + - Account documentation: TREE_ACCOUNT.md + +2. `BatchedQueueAccount` (output queue): + - Must be associated with the state tree (pubkeys match) + - Account layout defined in: src/queue.rs + - Account documentation: QUEUE_ACCOUNT.md + +**Operation Logic and Checks:** + +1. **Check tree is not full:** + - Verify: `tree.next_index + zkp_batch_size <= tree_capacity` + - Error if tree would exceed capacity after this batch + +2. **Get batch information:** + - Get `pending_batch_index` from queue (batch ready for tree insertion) + - Get `first_ready_zkp_batch_index` from batch (next ZKP batch to insert) + - Verify batch has ready ZKP batches: `num_full_zkp_batches > num_inserted_zkp_batches` + - Batch can be in `Fill` (still being filled) or `Full` (completely filled) state + +3. **Create public inputs hash:** + - Get `leaves_hash_chain` from output queue for this ZKP batch + - Get `old_root` from tree root history (most recent root) + - Get `start_index` from tree (where this batch will be appended) + - Compute: `public_input_hash = Hash([old_root, new_root, leaves_hash_chain, start_index])` + +4. **Verify ZKP and update tree:** + Calls `verify_update` which: + - Verifies proof: `verify_batch_append_with_proofs(zkp_batch_size, public_input_hash, proof)` + - Increments tree next_index by zkp_batch_size + - Increments sequence_number (tree update counter) + - Appends new_root to root_history (cyclic buffer) + - Returns (old_next_index, new_next_index) for event + + **What the ZKP (circuit) proves:** + The batch append circuit proves that a batch of values has been correctly appended to the Merkle tree: + 1. Verifies the public input hash matches Hash([old_root, new_root, leaves_hash_chain, start_index]) + 2. Verifies the leaves_hash_chain matches the hash chain of all new leaves + 3. For each position in the batch (zkp_batch_size positions): + - Checks if old_leaf is zero (empty slot) or non-zero (contains nullifier): + - If zero: insert the new leaf + - If non-zero: keep the old leaf (don't overwrite nullified values) + - Provides Merkle proof for the old leaf value + - Computes Merkle root update using MerkleRootUpdateGadget + - Updates running root for next iteration + 4. Verifies the final computed root equals the claimed new_root + 5. Public inputs: Hash([old_root, new_root, leaves_hash_chain, start_index]) + Note: Since input and output queues are independent, a nullifier can be inserted into the tree before the value is appended to the tree. The circuit handles this by checking if the position already contains a nullifier (old_leaf is non-zero) and keeping it instead of overwriting. + +5. **Mark ZKP batch as inserted:** + - Call `mark_as_inserted_in_merkle_tree` on queue batch: + - Increment `num_inserted_zkp_batches` + - If all ZKP batches inserted: + - Set batch `sequence_number = tree_sequence_number + root_history_capacity` (threshold at which root at root_index has been overwritten in cyclic root history) + - Set batch `root_index` (identifies root that must not exist when bloom filter is zeroed) + - Transition batch state to `Inserted` + - Return batch state for next step + +6. **Increment pending_batch_index if batch complete:** + - If batch state is now `Inserted`: + - Increment `pending_batch_index` (switches to other batch) + +7. **Zero out input queue bloom filter if ready:** + + Clears input queue bloom filter after batch insertion to enable batch reuse. This operation runs during both output queue updates AND input queue updates (nullify and address operations). + + **Why zeroing is necessary:** + - Input queue bloom filters store compressed account hashes to prevent double-spending + - After batch insertion, old bloom filter values prevent batch reuse (non-inclusion checks fail for legitimate new insertions) + - Roots from batch insertion period can prove inclusion of bloom filter values + - Bloom filter must be zeroed to reuse batch; unsafe roots must be zeroed if they still exist in root history + + **When zeroing occurs (all conditions must be true):** + 1. Current batch is at least 50% full: `num_inserted_elements >= batch_size / 2` + 2. Current batch is NOT in `Inserted` state (still being filled) + 3. Previous batch is in `Inserted` state (fully processed) + 4. Previous batch bloom filter NOT already zeroed: `!bloom_filter_is_zeroed()` + 5. At least one tree update occurred since batch completion: `batch.sequence_number != current_tree.sequence_number` + + **Why wait until 50% full:** + - Zeroing is computationally expensive (foresters perform this, not users) + - Don't zero when inserting last zkp of batch (would cause failing user transactions) + - Grace period for clients to switch from proof-by-index to proof-by-zkp for previous batch values + + **Zeroing procedure:** + + a. **Mark bloom filter as zeroed** - Sets flag to prevent re-zeroing + + b. **Zero out bloom filter bytes** - All bytes set to 0 + + c. **Zero out overlapping roots** (if any exist): + + **Check for overlapping roots:** + - Overlapping roots exist if: `batch.sequence_number > current_tree.sequence_number` + - Cyclic root history has NOT yet overwritten all roots from batch insertion period + - `batch.sequence_number` was set to `tree_sequence_number + root_history_capacity` at batch completion + - Represents threshold at which root at `batch.root_index` would be naturally overwritten + + **Calculate unsafe roots:** + - `num_remaining_roots = batch.sequence_number - current_tree.sequence_number` + - Roots NOT overwritten since batch insertion + - These roots can still prove inclusion of bloom filter values + - `first_safe_root_index = batch.root_index + 1` + + **Safety check:** + - Verify: `num_remaining_roots < root_history.len()` (never zero complete or more than complete root history) + + **Zero unsafe roots:** + - Start at `oldest_root_index = root_history.first_index()` + - Zero `num_remaining_roots` consecutive roots in cyclic buffer + - Loop wraps: `oldest_root_index = (oldest_root_index + 1) % root_history.len()` + - Sets each root to `[0u8; 32]` + + **Defensive assertion:** + - Verify ended at `first_safe_root_index` (ensures correct range zeroed) + + **Why safe:** + - `sequence_number` mechanism determines when roots are safe to keep + - Roots at or after `first_safe_root_index` are from updates after batch insertion + - These roots cannot prove inclusion of zeroed bloom filter values + - Manual zeroing of overlapping roots prevents cyclic buffer race conditions + +8. **Return batch append event:** + - Contains merkle_tree_pubkey, output_queue_pubkey, batch indices, root info, next_index range + +**Validations:** +- Tree must be state tree (enforced by tree type check) +- Tree must not be full after this batch insertion +- Queue and tree must be associated (pubkeys match) +- Batch must have ready ZKP batches: `num_full_zkp_batches > num_inserted_zkp_batches` +- Batch must not be in `Inserted` state +- ZKP must verify correctly against public inputs + +**State Changes:** + +**Tree account:** +- `next_index`: Incremented by zkp_batch_size (is the leaf index for next insertion) +- `sequence_number`: Incremented by 1 (tracks the number of tree updates) +- `root_history`: New root appended (cyclic buffer, overwrites oldest) +- Input queue bloom filter: May be zeroed if current batch is 50% inserted AND previous batch is fully inserted AND bloom filter not yet zeroed + +**Queue account:** +- Batch `num_inserted_zkp_batches`: Incremented +- Batch `state`: May transition to `Inserted` when all ZKP batches complete +- Batch `sequence_number`: Set to `tree_sequence_number + root_history_capacity` when batch fully inserted (threshold at which root at root_index has been overwritten in cyclic root history) +- Batch `root_index`: Set when batch fully inserted (identifies root that must not exist when bloom filter is zeroed) +- Queue `pending_batch_index`: Increments when batch complete + +**Errors:** +- `MerkleTreeMetadataError::InvalidTreeType` (error code: 14007) - Tree is not a state tree +- `MerkleTreeMetadataError::MerkleTreeAndQueueNotAssociated` (error code: 14001) - Queue and tree pubkeys don't match +- `BatchedMerkleTreeError::TreeIsFull` (error code: 14310) - Tree would exceed capacity after this batch +- `BatchedMerkleTreeError::BatchNotReady` (error code: 14301) - Batch is not in correct state for insertion +- `BatchedMerkleTreeError::InvalidIndex` (error code: 14309) - Root history is empty or index out of bounds +- `BatchedMerkleTreeError::CannotZeroCompleteRootHistory` (error code: 14313) - Cannot zero out complete or more than complete root history +- `VerifierError::ProofVerificationFailed` (error code: 13006) - ZKP verification failed (proof is invalid) +- `ZeroCopyError` (error codes: 15001-15017) - Failed to access root history or hash chain stores diff --git a/program-libs/batched-merkle-tree/src/batch.rs b/program-libs/batched-merkle-tree/src/batch.rs index 093e0b7815..c07a0fdd71 100644 --- a/program-libs/batched-merkle-tree/src/batch.rs +++ b/program-libs/batched-merkle-tree/src/batch.rs @@ -181,6 +181,15 @@ impl Batch { /// fill -> full -> inserted -> fill /// (from tree insertion perspective is pending if fill or full) + #[cfg_attr(kani, kani::ensures(|result: &Result<(), BatchedMerkleTreeError>| { + result.is_ok().then(|| self.get_state() == BatchState::Inserted).unwrap_or(true) + }))] + #[cfg_attr(kani, kani::ensures(|result: &Result<(), BatchedMerkleTreeError>| { + result.is_ok().then(|| self.num_full_zkp_batches == self.batch_size / self.zkp_batch_size).unwrap_or(true) + }))] + #[cfg_attr(kani, kani::ensures(|result: &Result<(), BatchedMerkleTreeError>| { + result.is_ok().then(|| self.num_inserted_zkp_batches == self.batch_size / self.zkp_batch_size).unwrap_or(true) + }))] pub fn advance_state_to_inserted(&mut self) -> Result<(), BatchedMerkleTreeError> { if self.get_state() == BatchState::Full { self.state = BatchState::Inserted.into(); @@ -225,6 +234,62 @@ impl Batch { self.num_full_zkp_batches > self.num_inserted_zkp_batches } + /// Kani-only: Mock address insertion - populates hash chain and updates batch state + #[cfg_attr(kani, kani::requires(bloom_filter_store.len() > 0))] + pub fn kani_mock_address_insert( + &mut self, + value: &[u8; 32], + hash_chain_store: &mut ZeroCopyVecU64<[u8; 32]>, + bloom_filter_store: &mut [u8], + ) -> Result<(), BatchedMerkleTreeError> { + // Auto-reset batch if it's in Inserted state (mirrors insert_into_current_queue_batch) + if self.get_state() == BatchState::Inserted { + hash_chain_store.clear(); + self.advance_state_to_fill(None)?; + } else if self.get_state() != BatchState::Fill { + return Err(BatchedMerkleTreeError::BatchNotReady); + } + + if self.num_inserted == 0 { + hash_chain_store.push(*value)?; + } else if let Some(h) = hash_chain_store.last_mut() { + *h = *value; + } + bloom_filter_store[0] = 1; // dummy operation to simulate bloom filter insertion + self.num_inserted += 1; + if self.num_inserted == self.zkp_batch_size { + self.num_full_zkp_batches += 1; + self.num_inserted = 0; + if self.num_full_zkp_batches == self.get_num_zkp_batches() { + self.advance_state_to_full()?; + } + } + Ok(()) + } + + /// Mock insert for output queues - only advances batch state + #[cfg(kani)] + pub fn kani_mock_output_insert(&mut self) -> Result<(), BatchedMerkleTreeError> { + // Auto-reset batch if it's in Inserted state + if self.get_state() == BatchState::Inserted { + self.advance_state_to_fill(None)?; + } else if self.get_state() != BatchState::Fill { + return Err(BatchedMerkleTreeError::BatchNotReady); + } + + // Only update batch metadata - no need to store actual values + self.num_inserted += 1; + if self.num_inserted == self.zkp_batch_size { + self.num_full_zkp_batches += 1; + self.num_inserted = 0; + if self.num_full_zkp_batches == self.get_num_zkp_batches() { + self.advance_state_to_full()?; + } + } + + Ok(()) + } + /// Returns the number of zkp batch updates /// that are ready to be inserted into the tree. pub fn get_num_ready_zkp_updates(&self) -> u64 { @@ -259,6 +324,16 @@ impl Batch { self.num_full_zkp_batches * self.zkp_batch_size + self.num_inserted } + /// Returns the batch size. + pub fn get_batch_size(&self) -> u64 { + self.batch_size + } + + /// Returns the zkp batch size. + pub fn get_zkp_batch_size(&self) -> u64 { + self.zkp_batch_size + } + /// Returns the number of zkp batches in the batch. pub fn get_num_zkp_batches(&self) -> u64 { self.batch_size / self.zkp_batch_size diff --git a/program-libs/batched-merkle-tree/src/errors.rs b/program-libs/batched-merkle-tree/src/errors.rs index a322777757..e09b5bc21b 100644 --- a/program-libs/batched-merkle-tree/src/errors.rs +++ b/program-libs/batched-merkle-tree/src/errors.rs @@ -51,6 +51,8 @@ pub enum BatchedMerkleTreeError { NonInclusionCheckFailed, #[error("Bloom filter must be zeroed prior to reusing a batch.")] BloomFilterNotZeroed, + #[error("Cannot zero out complete or more than complete root history.")] + CannotZeroCompleteRootHistory, #[error("Account error {0}")] AccountError(#[from] AccountError), } @@ -70,6 +72,7 @@ impl From for u32 { BatchedMerkleTreeError::TreeIsFull => 14310, BatchedMerkleTreeError::NonInclusionCheckFailed => 14311, BatchedMerkleTreeError::BloomFilterNotZeroed => 14312, + BatchedMerkleTreeError::CannotZeroCompleteRootHistory => 14313, BatchedMerkleTreeError::Hasher(e) => e.into(), BatchedMerkleTreeError::ZeroCopy(e) => e.into(), BatchedMerkleTreeError::MerkleTreeMetadata(e) => e.into(), diff --git a/program-libs/batched-merkle-tree/src/merkle_tree.rs b/program-libs/batched-merkle-tree/src/merkle_tree.rs index ca91e6ec9b..3354447521 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree.rs @@ -23,7 +23,10 @@ use light_verifier::{ use light_zero_copy::{ cyclic_vec::ZeroCopyCyclicVecU64, errors::ZeroCopyError, vec::ZeroCopyVecU64, }; +#[cfg(not(kani))] use zerocopy::Ref; +#[cfg(kani)] +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; use super::batch::Batch; use crate::{ @@ -75,6 +78,7 @@ pub type InstructionDataBatchAppendInputs = InstructionDataBatchNullifyInputs; /// - get_state_root_by_index /// - get_address_root_by_index #[derive(Debug, PartialEq)] +#[cfg(not(kani))] pub struct BatchedMerkleTreeAccount<'a> { pubkey: Pubkey, metadata: Ref<&'a mut [u8], BatchedMerkleTreeMetadata>, @@ -83,6 +87,62 @@ pub struct BatchedMerkleTreeAccount<'a> { pub hash_chain_stores: [ZeroCopyVecU64<'a, [u8; 32]>; 2], } +#[derive(Debug, PartialEq)] +#[cfg(kani)] +pub struct BatchedMerkleTreeAccount<'a> { + pubkey: Pubkey, + metadata: &'a mut BatchedMerkleTreeMetadata, + pub root_history: ZeroCopyCyclicVecU64<'a, [u8; 32]>, + pub bloom_filter_stores: [&'a mut [u8]; 2], + pub hash_chain_stores: [ZeroCopyVecU64<'a, [u8; 32]>; 2], + /// Ghost state (verification-only): tracks which batch created each root + /// Parallel to root_history - same capacity, same cyclic indexing + /// Entry value is batch index (0 or 1) that created the root at that position + #[cfg(kani)] + pub ghost_root_batch: GhostRoots, +} + +#[cfg(kani)] +#[derive(Debug, PartialEq)] +pub struct GhostRoots { + pub batch_0: Vec, + pub batch_1: Vec, +} + +#[cfg(kani)] +impl GhostRoots { + pub fn track_root(&mut self, batch_index: usize, sequence_number: u64, root: [u8; 32]) { + let meta = GhostRootMeta { + sequence_number, + root, + }; + match batch_index { + 0 => self.batch_0.push(meta), + 1 => self.batch_1.push(meta), + _ => {} + } + } +} + +#[cfg(kani)] +#[repr(C)] +#[derive( + BorshSerialize, + BorshDeserialize, + Debug, + PartialEq, + Clone, + Copy, + FromBytes, + IntoBytes, + KnownLayout, + Immutable, +)] +pub struct GhostRootMeta { + pub sequence_number: u64, + pub root: [u8; 32], +} + impl Discriminator for BatchedMerkleTreeAccount<'_> { const LIGHT_DISCRIMINATOR: [u8; 8] = *b"BatchMta"; const LIGHT_DISCRIMINATOR_SLICE: &'static [u8] = b"BatchMta"; @@ -183,9 +243,28 @@ impl<'a> BatchedMerkleTreeAccount<'a> { ) -> Result, BatchedMerkleTreeError> { // Discriminator is already checked in check_account_info. let (_discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); + + #[cfg(not(kani))] let (metadata, account_data) = Ref::<&'a mut [u8], BatchedMerkleTreeMetadata>::from_prefix(account_data) .map_err(ZeroCopyError::from)?; + + #[cfg(kani)] + let (metadata, account_data) = { + let size = std::mem::size_of::(); + if account_data.len() < size { + return Err(ZeroCopyError::Size.into()); + } + let (meta_bytes, remaining) = account_data.split_at_mut(size); + let metadata = unsafe { + let ptr = meta_bytes.as_mut_ptr() as *mut BatchedMerkleTreeMetadata; + // Read potentially unaligned, write back, then get mutable ref + core::ptr::write_unaligned(ptr, core::ptr::read_unaligned(ptr as *const _)); + &mut *ptr + }; + (metadata, remaining) + }; + if metadata.tree_type != TREE_TYPE { return Err(MerkleTreeMetadataError::InvalidTreeType.into()); } @@ -202,14 +281,35 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // Hash chain stores for input or address queue. let (hash_chain_store_0, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; - let hash_chain_store_1 = ZeroCopyVecU64::from_bytes(account_data)?; - Ok(BatchedMerkleTreeAccount { - pubkey: *pubkey, - metadata, - root_history, - bloom_filter_stores, - hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], - }) + + #[cfg(not(kani))] + { + let hash_chain_store_1 = ZeroCopyVecU64::from_bytes(account_data)?; + Ok(BatchedMerkleTreeAccount { + pubkey: *pubkey, + metadata, + root_history, + bloom_filter_stores, + hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], + }) + } + + #[cfg(kani)] + { + let (hash_chain_store_1, _account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; + let ghost_root_batch = GhostRoots { + batch_0: Vec::new(), + batch_1: Vec::new(), + }; + Ok(BatchedMerkleTreeAccount { + pubkey: *pubkey, + metadata, + root_history, + bloom_filter_stores, + hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], + ghost_root_batch, + }) + } } #[allow(clippy::too_many_arguments)] @@ -229,10 +329,27 @@ impl<'a> BatchedMerkleTreeAccount<'a> { let (discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); set_discriminator::(discriminator)?; + #[cfg(not(kani))] let (mut account_metadata, account_data) = Ref::<&'a mut [u8], BatchedMerkleTreeMetadata>::from_prefix(account_data) .map_err(ZeroCopyError::from)?; + #[cfg(kani)] + let (account_metadata, account_data) = { + let size = std::mem::size_of::(); + #[cfg(kani)] + kani::cover!(account_data.len() >= size, "Size check passed in init"); + if account_data.len() < size { + return Err(ZeroCopyError::Size.into()); + } + let (meta_bytes, remaining) = account_data.split_at_mut(size); + #[cfg(kani)] + kani::cover!(true, "Past split_at_mut in init"); + let metadata = + unsafe { &mut *(meta_bytes.as_mut_ptr() as *mut BatchedMerkleTreeMetadata) }; + (metadata, remaining) + }; + // Precompute Merkle tree pubkey hash for use in system program. // The compressed account hash depends on the Merkle tree pubkey and leaf index. // Poseidon hashes required input size < bn254 field size. @@ -322,17 +439,46 @@ impl<'a> BatchedMerkleTreeAccount<'a> { account_metadata.queue_batches.get_num_zkp_batches(), account_data, )?; - let hash_chain_store_1 = ZeroCopyVecU64::new( - account_metadata.queue_batches.get_num_zkp_batches(), - account_data, - )?; - Ok(BatchedMerkleTreeAccount { - pubkey: *pubkey, - metadata: account_metadata, - root_history, - bloom_filter_stores, - hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], - }) + + #[cfg(not(kani))] + { + let hash_chain_store_1 = ZeroCopyVecU64::new( + account_metadata.queue_batches.get_num_zkp_batches(), + account_data, + )?; + Ok(BatchedMerkleTreeAccount { + pubkey: *pubkey, + metadata: account_metadata, + root_history, + bloom_filter_stores, + hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], + }) + } + + #[cfg(kani)] + { + kani::cover!(true, "Entering ghost state initialization"); + let hash_chain_result = ZeroCopyVecU64::new_at( + account_metadata.queue_batches.get_num_zkp_batches(), + account_data, + ); + kani::cover!(hash_chain_result.is_ok(), "hash_chain_store_1 created"); + let (hash_chain_store_1, _account_data) = hash_chain_result?; + + let ghost_root_batch = GhostRoots { + batch_0: Vec::new(), + batch_1: Vec::new(), + }; + kani::cover!(true, "Ghost state initialization complete"); + Ok(BatchedMerkleTreeAccount { + pubkey: *pubkey, + metadata: account_metadata, + root_history, + bloom_filter_stores, + hash_chain_stores: [hash_chain_store_0, hash_chain_store_1], + ghost_root_batch, + }) + } } /// Update the tree from the output queue account. @@ -371,6 +517,44 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// Note: when proving inclusion by index in /// value array we need to insert the value into a bloom_filter once it is /// inserted into the tree. Check this with get_num_inserted_zkps + #[cfg_attr(kani, kani::ensures(|result: &Result| { + result.as_ref().ok().and_then(|event| { + if let MerkleTreeEvent::BatchAppend(batch_event) = event { + let old_batch = &old(queue_account.batch_metadata.batches[queue_account.batch_metadata.pending_batch_index as usize]); + let old_seq = old(self.sequence_number); + let old_next_idx = old(self.next_index); + let old_queue_pending = old(queue_account.batch_metadata.pending_batch_index); + let max_zkp = queue_account.batch_metadata.batch_size / queue_account.batch_metadata.zkp_batch_size; + + // 1. Batch was ready to insert + let ready_check = old_batch.batch_is_ready_to_insert(); + + // 2. Sequence number increments by 1 + let seq_check = self.sequence_number == old_seq + 1; + + // 3. New root added to history + let root_check = self.root_history.last().map(|&root| root == instruction_data.new_root).unwrap_or(false); + + // 4. ZKP batch index in event matches old inserted count + let zkp_check = batch_event.zkp_batch_index == old_batch.get_num_inserted_zkps(); + + // 5. If last ZKP batch, queue batch state is Inserted + let was_last = batch_event.zkp_batch_index == max_zkp - 1; + let state_check = !was_last || queue_account.batch_metadata.batches[batch_event.batch_index as usize].get_state() == BatchState::Inserted; + + // 6. Tree next_index increments by zkp_batch_size (always for output queue) + let next_idx_check = self.next_index == old_next_idx + queue_account.batch_metadata.zkp_batch_size; + + // 7. If batch completed, queue pending_batch_index must switch + let batch_became_inserted = batch_event.zkp_batch_index == max_zkp - 1; + let batch_switch_check = !batch_became_inserted || queue_account.batch_metadata.pending_batch_index != old_queue_pending; + + Some(ready_check && seq_check && root_check && zkp_check && state_check && next_idx_check && batch_switch_check) + } else { + None + } + }).unwrap_or(true) + }))] pub fn update_tree_from_output_queue_account( &mut self, queue_account: &mut BatchedQueueAccount, @@ -385,20 +569,29 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // 1. Create public inputs hash. let public_input_hash = { - let leaves_hash_chain = queue_account.hash_chain_stores[pending_batch_index] - [first_ready_zkp_batch_index as usize]; - let old_root = self - .root_history - .last() - .ok_or(BatchedMerkleTreeError::InvalidIndex)?; - let mut start_index_bytes = [0u8; 32]; - start_index_bytes[24..].copy_from_slice(&self.next_index.to_be_bytes()); - create_hash_chain_from_array([ - *old_root, - new_root, - leaves_hash_chain, - start_index_bytes, - ])? + // For Kani verification, skip hash chain computation to avoid indexing into empty Vecs. + // We stub the proof verification instead, so the actual hash value doesn't matter. + #[cfg(kani)] + { + [0u8; 32] + } + #[cfg(not(kani))] + { + let leaves_hash_chain = queue_account.hash_chain_stores[pending_batch_index] + [first_ready_zkp_batch_index as usize]; + let old_root = self + .root_history + .last() + .ok_or(BatchedMerkleTreeError::InvalidIndex)?; + let mut start_index_bytes = [0u8; 32]; + start_index_bytes[24..].copy_from_slice(&self.next_index.to_be_bytes()); + create_hash_chain_from_array([ + *old_root, + new_root, + leaves_hash_chain, + start_index_bytes, + ])? + } }; // 2. Verify update proof and update tree account. @@ -486,6 +679,44 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// 5. Increment next full batch index if inserted. /// 6. Return the batch nullify event. #[inline(always)] + #[cfg_attr(kani, kani::ensures(|result: &Result| { + result.as_ref().ok().map(|event| { + let old_batch = &old(self.queue_batches.batches[self.queue_batches.pending_batch_index as usize]); + let old_seq = old(self.sequence_number); + let old_next_idx = old(self.next_index); + let max_zkp = self.queue_batches.batch_size / self.queue_batches.zkp_batch_size; + + // 0. Batch was ready to insert + let ready_check = old_batch.batch_is_ready_to_insert(); + + // 1. Sequence number increments by 1 + let seq_check = self.sequence_number == old_seq + 1; + + // 2. New root added to history + let root_check = self.root_history.last().map(|&root| root == instruction_data.new_root).unwrap_or(false); + + // 3. ZKP batch index in event matches old inserted count + let zkp_check = event.zkp_batch_index == old_batch.get_num_inserted_zkps(); + + // 5. If last ZKP batch, state is Inserted + let was_last = event.zkp_batch_index == max_zkp - 1; + let state_check = !was_last || self.queue_batches.batches[event.batch_index as usize].get_state() == BatchState::Inserted; + + // 6. next_index behavior based on tree type + let next_idx_check = if QUEUE_TYPE == ADDRESS_QUEUE_TYPE_V2 { + self.next_index == old_next_idx + self.queue_batches.zkp_batch_size + } else { + self.next_index == old_next_idx + }; + + // 7. If batch completed, pending_batch_index must switch + let old_pending = old(self.queue_batches.pending_batch_index); + let batch_became_inserted = event.zkp_batch_index == max_zkp - 1; + let batch_switch_check = !batch_became_inserted || self.queue_batches.pending_batch_index != old_pending; + + ready_check && seq_check && root_check && zkp_check && state_check && next_idx_check && batch_switch_check + }).unwrap_or(true) + }))] fn update_input_queue( &mut self, instruction_data: InstructionDataBatchNullifyInputs, @@ -505,19 +736,29 @@ impl<'a> BatchedMerkleTreeAccount<'a> { .last() .ok_or(BatchedMerkleTreeError::InvalidIndex)?; - if QueueType::from(QUEUE_TYPE) == QueueType::InputStateV2 { - create_hash_chain_from_array([*old_root, new_root, leaves_hash_chain])? - } else if QueueType::from(QUEUE_TYPE) == QueueType::AddressV2 { - let mut next_index_bytes = [0u8; 32]; - next_index_bytes[24..].copy_from_slice(self.next_index.to_be_bytes().as_slice()); - create_hash_chain_from_array([ - *old_root, - new_root, - leaves_hash_chain, - next_index_bytes, - ])? - } else { - return Err(MerkleTreeMetadataError::InvalidQueueType.into()); + // For Kani verification, skip hash chain computation to avoid const generic stubbing issues. + // We stub the proof verification instead, so the actual hash value doesn't matter. + #[cfg(kani)] + { + [0u8; 32] + } + #[cfg(not(kani))] + { + if QueueType::from(QUEUE_TYPE) == QueueType::InputStateV2 { + create_hash_chain_from_array([*old_root, new_root, leaves_hash_chain])? + } else if QueueType::from(QUEUE_TYPE) == QueueType::AddressV2 { + let mut next_index_bytes = [0u8; 32]; + next_index_bytes[24..] + .copy_from_slice(self.next_index.to_be_bytes().as_slice()); + create_hash_chain_from_array([ + *old_root, + new_root, + leaves_hash_chain, + next_index_bytes, + ])? + } else { + return Err(MerkleTreeMetadataError::InvalidQueueType.into()); + } } }; @@ -581,6 +822,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { ) -> Result<(u64, u64), BatchedMerkleTreeError> { // 1. Verify update proof. let (old_next_index, new_next_index) = if QUEUE_TYPE == QueueType::OutputStateV2 as u64 { + // For Kani verification, skip proof verification to simplify symbolic execution. + #[cfg(not(kani))] verify_batch_append_with_proofs(batch_size, public_input_hash, &proof)?; let old_next_index = self.next_index; // 2. Increment next index. @@ -588,6 +831,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { (old_next_index, self.next_index) } else if QUEUE_TYPE == QueueType::InputStateV2 as u64 { let old_next_index = self.nullifier_next_index; + // For Kani verification, skip proof verification to simplify symbolic execution. + #[cfg(not(kani))] verify_batch_update(batch_size, public_input_hash, &proof)?; // 2. incrementing nullifier next index. // This index is used by the indexer to remove elements from the database nullifier queue. @@ -596,6 +841,8 @@ impl<'a> BatchedMerkleTreeAccount<'a> { (old_next_index, self.nullifier_next_index) } else if QUEUE_TYPE == QueueType::AddressV2 as u64 { let old_next_index = self.next_index; + // For Kani verification, skip proof verification to simplify symbolic execution. + #[cfg(not(kani))] verify_batch_address_update(batch_size, public_input_hash, &proof)?; // 2. Increment next index. self.increment_merkle_tree_next_index(batch_size); @@ -610,6 +857,17 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // it will overwrite the oldest root // once it is full. self.root_history.push(new_root); + + // 5. Update ghost state: track which batch created this root + #[cfg(kani)] + { + self.ghost_root_batch.track_root( + self.queue_batches.pending_batch_index as usize, + self.sequence_number, + new_root, + ); + } + Ok((old_next_index, new_next_index)) } @@ -747,7 +1005,11 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// - now all roots containing values nullified in the final B0 root update are zeroed /// - B0 is safe to clear /// - fn zero_out_roots(&mut self, sequence_number: u64, first_safe_root_index: u32) { + fn zero_out_roots( + &mut self, + sequence_number: u64, + first_safe_root_index: u32, + ) -> Result<(), BatchedMerkleTreeError> { // 1. Check whether overlapping roots exist. let overlapping_roots_exits = sequence_number > self.sequence_number; if overlapping_roots_exits { @@ -757,10 +1019,13 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // the update of the previous batch therfore allow anyone to prove // inclusion of values nullified in the previous batch. let num_remaining_roots = sequence_number - self.sequence_number; + if num_remaining_roots >= self.root_history.len() as u64 { + return Err(BatchedMerkleTreeError::CannotZeroCompleteRootHistory); + } // 2.2. Zero out roots oldest to first safe root index. // Skip one iteration we don't need to zero out // the first safe root. - for _ in 1..num_remaining_roots { + for _ in 0..num_remaining_roots { self.root_history[oldest_root_index] = [0u8; 32]; oldest_root_index += 1; oldest_root_index %= self.root_history.len(); @@ -771,6 +1036,177 @@ impl<'a> BatchedMerkleTreeAccount<'a> { "Zeroing out roots failed." ); } + Ok(()) + } + + /// Ghost state invariant: + /// If a batch is zeroed, + /// root_history must contain only roots from the non-zeroed batch or zeros, + /// and at least one non-zero root. + #[cfg(kani)] + fn all_roots_are_safe(&self) -> bool { + let batch_0 = &self.queue_batches.batches[0]; + let batch_1 = &self.queue_batches.batches[1]; + + // If batch 0 is zeroed, root_history must contain ONLY batch_1 roots or zeros + if batch_0.bloom_filter_is_zeroed() && self.should_zero_bloom_filter() { + let mut has_non_zero = false; + for i in 0..self.root_history.len() { + let root = self.root_history[i]; + if root == [0u8; 32] { + continue; + } + has_non_zero = true; + let in_batch_1 = (0..self.ghost_root_batch.batch_1.len()) + .any(|j| self.ghost_root_batch.batch_1[j].root == root); + kani::cover!(in_batch_1, "Root not found in batch 1 roots"); + if !in_batch_1 { + return false; + } + } + kani::cover!(has_non_zero, "batch 0 roots has only zeros"); + if !has_non_zero { + return false; + } + } + + // If batch 1 is zeroed, root_history must contain ONLY batch_0 roots or zeros + if batch_1.bloom_filter_is_zeroed() && self.should_zero_bloom_filter() { + let mut has_non_zero = false; + for i in 0..self.root_history.len() { + let root = self.root_history[i]; + if root == [0u8; 32] { + continue; + } + has_non_zero = true; + let in_batch_0 = (0..self.ghost_root_batch.batch_0.len()) + .any(|j| self.ghost_root_batch.batch_0[j].root == root); + kani::cover!(in_batch_0, "Root not found in batch 0 roots"); + if !in_batch_0 { + return false; + } + } + kani::cover!(has_non_zero, "batch 1 roots has only zeros"); + if !has_non_zero { + return false; + } + } + + true + } + + /// Bloom filter zeroing invariant: + /// If a batch's bloom_filter_is_zeroed flag is set, all bloom filter bytes must be zero. + #[cfg(kani)] + fn bloom_filters_are_zeroed(&self) -> bool { + if self.should_zero_bloom_filter() { + (0..2).all(|i| { + !self.queue_batches.batches[i].bloom_filter_is_zeroed() + || self.bloom_filter_stores[i].iter().all(|&b| b == 0) + }) + } else { + true + } + } + + /// Helper: Current batch is at least half full + #[cfg(kani)] + fn current_batch_is_half_full(&self) -> bool { + let curr = self.queue_batches.pending_batch_index as usize; + self.queue_batches.batches[curr].get_num_inserted_elements() + >= self.queue_batches.batch_size / 2 + } + + /// Helper: Current batch is not inserted + #[cfg(kani)] + fn current_batch_not_inserted(&self) -> bool { + let curr = self.queue_batches.pending_batch_index as usize; + self.queue_batches.batches[curr].get_state() != BatchState::Inserted + } + + /// Helper: Previous batch is inserted + #[cfg(kani)] + fn previous_batch_is_inserted(&self) -> bool { + let curr = self.queue_batches.pending_batch_index as usize; + let prev = if curr == 0 { 1 } else { 0 }; + self.queue_batches.batches[prev].get_state() == BatchState::Inserted + } + + /// Helper: Previous batch bloom filter is not zeroed + #[cfg(kani)] + fn previous_batch_bloom_not_zeroed(&self) -> bool { + let curr = self.queue_batches.pending_batch_index as usize; + let prev = if curr == 0 { 1 } else { 0 }; + !self.queue_batches.batches[prev].bloom_filter_is_zeroed() + } + + /// Helper: At least one update since previous batch insertion + #[cfg(kani)] + fn has_updates_since_previous_batch(&self) -> bool { + let curr = self.queue_batches.pending_batch_index as usize; + let prev = if curr == 0 { 1 } else { 0 }; + let seq_diff = self.queue_batches.batches[prev] + .sequence_number + .saturating_sub(self.metadata.root_history_capacity as u64); + seq_diff != self.sequence_number + } + + /// Helper predicate to determine if bloom filter should be zeroed. + /// Returns true if ALL of these conditions are met: + /// 1. Current batch is at least 50% full + /// 2. Current batch is not yet inserted into the tree + /// 3. Previous batch has been fully inserted + /// 4. Previous batch's bloom filter hasn't been zeroed yet + /// 5. At least one tree update occurred since previous batch insertion + // Condition 1: Current batch is at least half full + #[cfg_attr(kani, kani::ensures(|&result: &bool| !result || self.current_batch_is_half_full()))] + // Condition 2: Current batch is not inserted + #[cfg_attr(kani, kani::ensures(|&result: &bool| !result || self.current_batch_not_inserted()))] + // Condition 3: Previous batch is inserted + #[cfg_attr(kani, kani::ensures(|&result: &bool| !result || self.previous_batch_is_inserted()))] + // Condition 4: Previous batch's bloom filter is not zeroed + #[cfg_attr(kani, kani::ensures(|&result: &bool| !result || self.previous_batch_bloom_not_zeroed()))] + // Condition 5: At least one update since previous batch insertion + #[cfg_attr(kani, kani::ensures(|&result: &bool| !result || self.has_updates_since_previous_batch()))] + // Condition 6 (reverse): If all conditions hold, result must be true + #[cfg_attr(kani, kani::ensures(|&result: &bool| { + let all_conditions = self.current_batch_is_half_full() + && self.current_batch_not_inserted() + && self.previous_batch_is_inserted() + && self.previous_batch_bloom_not_zeroed() + && self.has_updates_since_previous_batch(); + !all_conditions || result + }))] + fn should_zero_bloom_filter(&self) -> bool { + let current_batch = self.queue_batches.pending_batch_index as usize; + let previous_batch_index = if current_batch == 0 { 1 } else { 0 }; + let batch_size = self.queue_batches.batch_size; + + // Condition 1 & 2: Current batch is at least half full and not inserted + let current_batch_is_half_full = { + let current_batch_is_not_inserted = + self.queue_batches.batches[current_batch].get_state() != BatchState::Inserted; + let num_inserted_elements = + self.queue_batches.batches[current_batch].get_num_inserted_elements(); + num_inserted_elements >= batch_size / 2 && current_batch_is_not_inserted + }; + + // Condition 3 & 4: Previous batch is inserted but not zeroed + let previous_batch_is_ready = { + let previous_batch = &self.queue_batches.batches[previous_batch_index]; + previous_batch.get_state() == BatchState::Inserted + && !previous_batch.bloom_filter_is_zeroed() + }; + + // Condition 5: At least one update since previous batch insertion + let has_updates = { + let seq_diff = self.queue_batches.batches[previous_batch_index] + .sequence_number + .saturating_sub(self.metadata.root_history_capacity as u64); + seq_diff != self.sequence_number + }; + + current_batch_is_half_full && previous_batch_is_ready && has_updates } /// Zero out bloom filter of previous batch if 50% of the @@ -796,35 +1232,33 @@ impl<'a> BatchedMerkleTreeAccount<'a> { /// Initial state: 0 pending -> 1 previous pending even though it was never used /// 0 inserted -> 1 pending 0 -> 1 pending 50% - zero out 0 -> 1 inserted /// 0 pending -> 1 inserted + #[cfg_attr(kani, kani::ensures(|result: &Result<(), BatchedMerkleTreeError>| { + result.is_ok().then(|| self.all_roots_are_safe()).unwrap_or(true) + }))] + #[cfg_attr(kani, kani::ensures(|result: &Result<(), BatchedMerkleTreeError>| { + result.is_ok().then(|| self.bloom_filters_are_zeroed()).unwrap_or(true) + }))] fn zero_out_previous_batch_bloom_filter(&mut self) -> Result<(), BatchedMerkleTreeError> { - let current_batch = self.queue_batches.pending_batch_index as usize; - let batch_size = self.queue_batches.batch_size; - let previous_pending_batch_index = if 0 == current_batch { 1 } else { 0 }; - let current_batch_is_half_full = { - let current_batch_is_not_inserted = - self.queue_batches.batches[current_batch].get_state() != BatchState::Inserted; - let num_inserted_elements = - self.queue_batches.batches[current_batch].get_num_inserted_elements(); - let current_batch_is_half_full = num_inserted_elements >= batch_size / 2; - current_batch_is_half_full && current_batch_is_not_inserted - }; + // Check if we should zero the bloom filter using the helper predicate + if self.should_zero_bloom_filter() { + let current_batch = self.queue_batches.pending_batch_index as usize; + let previous_pending_batch_index = if 0 == current_batch { 1 } else { 0 }; - let previous_pending_batch = self - .queue_batches - .batches - .get_mut(previous_pending_batch_index) - .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; - - let previous_batch_is_inserted = previous_pending_batch.get_state() == BatchState::Inserted; - let previous_batch_is_ready = - previous_batch_is_inserted && !previous_pending_batch.bloom_filter_is_zeroed(); - - // Current batch is at least half full, previous batch is inserted, and not zeroed. - if current_batch_is_half_full && previous_batch_is_ready { + let previous_pending_batch = self + .queue_batches + .batches + .get_mut(previous_pending_batch_index) + .ok_or(BatchedMerkleTreeError::InvalidBatchIndex)?; // 3.1. Mark bloom filter zeroed. previous_pending_batch.set_bloom_filter_to_zeroed(); let seq = previous_pending_batch.sequence_number; - let root_index = previous_pending_batch.root_index; + // previous_pending_batch.root_index is the index the root + // of the last update of that batch was inserted at. + // This is the last unsafe root index. + // The next index is safe. + let first_safe_root_index = + (previous_pending_batch.root_index + 1) % (self.root_history.len() as u32); + // 3.2. Zero out bloom filter. { let bloom_filter = self @@ -837,7 +1271,7 @@ impl<'a> BatchedMerkleTreeAccount<'a> { // which allows to prove inclusion of a value // that was inserted into the bloom filter just zeroed out. { - self.zero_out_roots(seq, root_index); + self.zero_out_roots(seq, first_safe_root_index)?; } } @@ -928,6 +1362,36 @@ impl<'a> BatchedMerkleTreeAccount<'a> { fn increment_queue_next_index(&mut self) { self.queue_batches.next_index += 1; } + #[cfg(kani)] + /// Kani-only: Mock address insertion - handles disjoint borrows internally + pub fn kani_mock_insert( + &mut self, + batch_idx: usize, + value: &[u8; 32], + ) -> Result<(), BatchedMerkleTreeError> { + kani_mock_insert_helper( + value, + &mut self.metadata.queue_batches.batches, + &mut self.hash_chain_stores, + &mut self.bloom_filter_stores, + batch_idx, + ) + } +} + +#[cfg(kani)] +fn kani_mock_insert_helper( + value: &[u8; 32], + batches: &mut [Batch; 2], + hash_chain_stores: &mut [light_zero_copy::vec::ZeroCopyVecU64<[u8; 32]>; 2], + bloom_filter_stores: &mut [&mut [u8]; 2], + batch_idx: usize, +) -> Result<(), BatchedMerkleTreeError> { + batches[batch_idx].kani_mock_address_insert( + value, + &mut hash_chain_stores[batch_idx], + &mut bloom_filter_stores[batch_idx], + ) } #[cfg(feature = "test-only")] @@ -944,13 +1408,27 @@ impl Deref for BatchedMerkleTreeAccount<'_> { type Target = BatchedMerkleTreeMetadata; fn deref(&self) -> &Self::Target { - &self.metadata + #[cfg(not(kani))] + { + &self.metadata + } + #[cfg(kani)] + { + self.metadata + } } } impl DerefMut for BatchedMerkleTreeAccount<'_> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.metadata + #[cfg(not(kani))] + { + &mut self.metadata + } + #[cfg(kani)] + { + self.metadata + } } } @@ -981,6 +1459,7 @@ pub fn get_merkle_tree_account_size( mt_account.get_account_size().unwrap() } +#[cfg(feature = "test-only")] pub fn assert_nullify_event( event: MerkleTreeEvent, new_root: [u8; 32], @@ -1009,6 +1488,7 @@ pub fn assert_nullify_event( assert_eq!(event, ref_event); } +#[cfg(feature = "test-only")] pub fn assert_batch_append_event_event( event: MerkleTreeEvent, new_root: [u8; 32], @@ -1038,6 +1518,7 @@ pub fn assert_batch_append_event_event( assert_eq!(event, ref_event); } +#[cfg(feature = "test-only")] pub fn assert_batch_adress_event( event: MerkleTreeEvent, new_root: [u8; 32], @@ -1145,7 +1626,11 @@ mod test { let rng = &mut rand::rngs::StdRng::from_seed([0u8; 32]); let mut latest_root_0 = [0u8; 32]; let mut latest_root_1 = [0u8; 32]; - + #[allow(clippy::needless_late_init)] + let last_batch0_root; + let last_batch1_root; + let last_batch0_root_update2; + let first_batch1_root; // 1. No batch is ready // -> nothing should happen. { @@ -1153,6 +1638,7 @@ mod test { insert_rnd_addresses(&mut account_data, batch_size, rng, current_slot, &pubkey) .unwrap(); + println!("initial account.root_history {:?}", account.root_history); assert_eq!( account.queue_batches.batches[0].get_state(), BatchState::Full @@ -1181,7 +1667,9 @@ mod test { assert_eq!(account.queue_batches.pending_batch_index, 1); let index = account.queue_batches.batches[0].root_index; assert_eq!(account.root_history[index as usize], latest_root_0); + println!("account.root_history {:?}", account.root_history); } + last_batch0_root = latest_root_0; // 2. Batch 0 is inserted but Batch 1 is not half full // -> nothing should happen. { @@ -1214,6 +1702,39 @@ mod test { { insert_rnd_addresses(&mut account_data, 1, rng, current_slot, &pubkey).unwrap(); } + // 4.1 Batch 0 is inserted but Batch 1 is half full but no further roots exist + // if we zero out all roots we delete the tree state. + // -> nothing should happen. + { + let mut account_data = account_data.clone(); + let account_data_ref = account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) + .unwrap(); + account.zero_out_previous_batch_bloom_filter().unwrap(); + assert_eq!(account_data, account_data_ref); + } + { + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) + .unwrap(); + // Insert first root for batch 1. + let rnd_root = rng.gen(); + first_batch1_root = rnd_root; + account.root_history.push(rnd_root); + // latest_root_0 = rnd_root; // Not used after this point + account.metadata.sequence_number += 1; + let root_index = account.get_root_index(); + println!("root_index: {}", root_index); + let sequence_number = account.sequence_number; + + let state = account.queue_batches.batches[1] + .mark_as_inserted_in_merkle_tree(sequence_number, root_index, root_history_len) + .unwrap(); + account + .queue_batches + .increment_pending_batch_index_if_inserted(state); + } let mut account_data = account_data.clone(); let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); @@ -1227,10 +1748,7 @@ mod test { println!("previous_roots: {:?}", previous_roots); assert_ne!(previous_roots, current_roots); let root_index = account.queue_batches.batches[0].root_index; - assert_eq!( - account.root_history[root_index as usize], - previous_roots[root_index as usize] - ); + assert_eq!( account.queue_batches.batches[0].get_state(), BatchState::Inserted @@ -1244,12 +1762,18 @@ mod test { ); for i in 0..root_history_len as usize { - if i == root_index as usize { - assert_eq!(account.root_history[i], latest_root_0); + if i == root_index as usize + 1 { + assert_eq!(account.root_history[i], first_batch1_root); } else { assert_eq!(account.root_history[i], [0u8; 32]); } } + println!( + "account + .root_history {:?}", + account.root_history + ); + assert!(!account.root_history.iter().any(|x| *x == last_batch0_root)); } // Make Batch 1 full and insert { @@ -1261,7 +1785,7 @@ mod test { BatchState::Full ); // simulate batch insertion - for _ in 0..num_zkp_updates { + for _ in 1..num_zkp_updates { let rnd_root = rng.gen(); account.root_history.push(rnd_root); latest_root_1 = rnd_root; @@ -1284,6 +1808,7 @@ mod test { assert_eq!(account.queue_batches.pending_batch_index, 0); let index = account.queue_batches.batches[1].root_index; assert_eq!(account.root_history[index as usize], latest_root_1); + last_batch1_root = latest_root_1; } println!("pre 4"); // 5. Batch 1 is inserted and Batch 0 is empty @@ -1328,6 +1853,7 @@ mod test { insert_rnd_addresses(&mut account_data, 1, rng, current_slot, &pubkey).unwrap(); } // simulate 10 other batch insertions from an output queue + // that overwrite the complete root history { let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey) @@ -1338,9 +1864,11 @@ mod test { account.metadata.sequence_number += 1; } } + let mut account_data_ref = account_data.clone(); let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); + // Batch 0 is half full and other roots exist. -> should zero out bloom filter but not zero out any roots. account.zero_out_previous_batch_bloom_filter().unwrap(); let mut account_ref = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data_ref, &pubkey) @@ -1350,6 +1878,7 @@ mod test { .for_each(|x| *x = 0); account_ref.queue_batches.batches[1].set_bloom_filter_to_zeroed(); assert_eq!(account.get_metadata(), account_ref.get_metadata()); + assert!(!account.root_history.iter().any(|x| *x == last_batch1_root)); assert_eq!(account, account_ref); } // 8. Batch 1 is already zeroed -> nothing should happen @@ -1400,6 +1929,29 @@ mod test { .queue_batches .increment_pending_batch_index_if_inserted(state); } + last_batch0_root_update2 = *account.root_history.last().unwrap(); + println!("last_batch0_root_update2 {:?}", last_batch0_root_update2); + + // Perform batch 1 insertions to create a new root that is not part of batch 1 update. + { + let rnd_root = rng.gen(); + account.root_history.push(rnd_root); + println!("first batch 1 root {:?}", rnd_root); + account.metadata.sequence_number += 1; + let root_index = account.get_root_index(); + let sequence_number = account.sequence_number; + + let state = account.queue_batches.batches[1] + .mark_as_inserted_in_merkle_tree( + sequence_number, + root_index, + root_history_len, + ) + .unwrap(); + account + .queue_batches + .increment_pending_batch_index_if_inserted(state); + } } println!("pre 9"); let mut account_data_ref = account_data.clone(); @@ -1413,7 +1965,10 @@ mod test { account.queue_batches.batches[1].get_state(), BatchState::Full ); + let pre_roots = account.root_history.to_vec(); + println!("pre roots {:?}", pre_roots); account.zero_out_previous_batch_bloom_filter().unwrap(); + println!("post roots {:?}", account.root_history); let mut account_ref = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data_ref, &pubkey) .unwrap(); @@ -1424,12 +1979,16 @@ mod test { account_ref.queue_batches.batches[0].set_bloom_filter_to_zeroed(); assert_eq!(account.get_metadata(), account_ref.get_metadata()); for i in 0..root_history_len as usize { - if i == root_index as usize { + if i == root_index as usize + 1 { continue; } else { account_ref.root_history[i] = [0u8; 32]; } } + assert!(!account + .root_history + .iter() + .any(|x| *x == last_batch0_root_update2)); assert_eq!(account, account_ref); } @@ -1437,7 +1996,7 @@ mod test { { let mut account = BatchedMerkleTreeAccount::address_from_bytes(&mut account_data, &pubkey).unwrap(); - for _ in 0..num_zkp_updates { + for _ in 1..num_zkp_updates { let rnd_root = rng.gen(); account.root_history.push(rnd_root); account.metadata.sequence_number += 1; @@ -1470,7 +2029,6 @@ mod test { } println!("pre 9.1"); - // Zero out batch 1 with user tx { // fill batch 0 { diff --git a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs index 860fe633d3..d7d9ed54cb 100644 --- a/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs +++ b/program-libs/batched-merkle-tree/src/merkle_tree_metadata.rs @@ -85,6 +85,7 @@ impl BatchedMerkleTreeMetadata { + self .queue_batches .queue_account_size(QueueType::InputStateV2 as u64)?; + Ok(size) } diff --git a/program-libs/batched-merkle-tree/src/queue.rs b/program-libs/batched-merkle-tree/src/queue.rs index 5ec1f80f3b..27a39475bb 100644 --- a/program-libs/batched-merkle-tree/src/queue.rs +++ b/program-libs/batched-merkle-tree/src/queue.rs @@ -11,7 +11,9 @@ use light_compressed_account::{ }; use light_merkle_tree_metadata::{errors::MerkleTreeMetadataError, queue::QueueMetadata}; use light_zero_copy::{errors::ZeroCopyError, vec::ZeroCopyVecU64}; -use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout, Ref}; +#[cfg(not(kani))] +use zerocopy::Ref; +use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout}; // Import the feature-gated types from lib.rs use super::batch::BatchState; @@ -130,6 +132,7 @@ impl BatchedQueueMetadata { /// To read, light the system program uses: /// - `prove_inclusion_by_index` #[derive(Debug, PartialEq)] +#[cfg(not(kani))] pub struct BatchedQueueAccount<'a> { pubkey: Pubkey, metadata: Ref<&'a mut [u8], BatchedQueueMetadata>, @@ -137,6 +140,15 @@ pub struct BatchedQueueAccount<'a> { pub hash_chain_stores: [ZeroCopyVecU64<'a, [u8; 32]>; 2], } +#[derive(Debug, PartialEq)] +#[cfg(kani)] +pub struct BatchedQueueAccount<'a> { + pubkey: Pubkey, + metadata: &'a mut BatchedQueueMetadata, + pub value_vecs: [Vec<[u8; 32]>; 2], + pub hash_chain_stores: [Vec<[u8; 32]>; 2], +} + impl Discriminator for BatchedQueueAccount<'_> { const LIGHT_DISCRIMINATOR: [u8; 8] = *b"queueacc"; const LIGHT_DISCRIMINATOR_SLICE: &'static [u8] = b"queueacc"; @@ -190,26 +202,57 @@ impl<'a> BatchedQueueAccount<'a> { pubkey: Pubkey, ) -> Result, BatchedMerkleTreeError> { let (_discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); + + #[cfg(not(kani))] let (metadata, account_data) = Ref::<&'a mut [u8], BatchedQueueMetadata>::from_prefix(account_data) .map_err(ZeroCopyError::from)?; + #[cfg(kani)] + let (metadata, account_data) = { + let size = std::mem::size_of::(); + if account_data.len() < size { + return Err(ZeroCopyError::Size.into()); + } + let (meta_bytes, remaining) = account_data.split_at_mut(size); + let metadata = unsafe { + let ptr = meta_bytes.as_mut_ptr() as *mut BatchedQueueMetadata; + core::ptr::write_unaligned(ptr, core::ptr::read_unaligned(ptr as *const _)); + &mut *ptr + }; + (metadata, remaining) + }; + if metadata.metadata.queue_type != QUEUE_TYPE { return Err(MerkleTreeMetadataError::InvalidQueueType.into()); } - let (value_vec0, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; - let (value_vec1, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; - - let (hash_chain_store0, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; - let hash_chain_store1 = ZeroCopyVecU64::from_bytes(account_data)?; + #[cfg(not(kani))] + { + let (value_vec0, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; + let (value_vec1, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; + + let (hash_chain_store0, account_data) = ZeroCopyVecU64::from_bytes_at(account_data)?; + let hash_chain_store1 = ZeroCopyVecU64::from_bytes(account_data)?; + + Ok(BatchedQueueAccount { + pubkey, + metadata, + value_vecs: [value_vec0, value_vec1], + hash_chain_stores: [hash_chain_store0, hash_chain_store1], + }) + } - Ok(BatchedQueueAccount { - pubkey, - metadata, - value_vecs: [value_vec0, value_vec1], - hash_chain_stores: [hash_chain_store0, hash_chain_store1], - }) + #[cfg(kani)] + { + // For Kani: from_bytes is not used in verification tests, only init is used + Ok(BatchedQueueAccount { + pubkey, + metadata, + value_vecs: [Vec::new(), Vec::new()], + hash_chain_stores: [Vec::new(), Vec::new()], + }) + } } #[allow(clippy::too_many_arguments)] @@ -227,10 +270,22 @@ impl<'a> BatchedQueueAccount<'a> { let (discriminator, account_data) = account_data.split_at_mut(DISCRIMINATOR_LEN); set_discriminator::(discriminator)?; + #[cfg(not(kani))] let (mut account_metadata, account_data) = Ref::<&mut [u8], BatchedQueueMetadata>::from_prefix(account_data) .map_err(ZeroCopyError::from)?; + #[cfg(kani)] + let (account_metadata, account_data) = { + let size = std::mem::size_of::(); + if account_data.len() < size { + return Err(ZeroCopyError::Size.into()); + } + let (meta_bytes, remaining) = account_data.split_at_mut(size); + let metadata = unsafe { &mut *(meta_bytes.as_mut_ptr() as *mut BatchedQueueMetadata) }; + (metadata, remaining) + }; + account_metadata.init( metadata, output_queue_batch_size, @@ -260,25 +315,41 @@ impl<'a> BatchedQueueAccount<'a> { let value_vec_capacity = account_metadata.batch_metadata.batch_size; let hash_chain_capacity = account_metadata.batch_metadata.get_num_zkp_batches(); - let (value_vecs_0, account_data) = - ZeroCopyVecU64::new_at(value_vec_capacity, account_data)?; - let (value_vecs_1, account_data) = - ZeroCopyVecU64::new_at(value_vec_capacity, account_data)?; - let (hash_chain_0, account_data) = - ZeroCopyVecU64::new_at(hash_chain_capacity, account_data)?; - let hash_chain_1 = ZeroCopyVecU64::new(hash_chain_capacity, account_data)?; - Ok(BatchedQueueAccount { - pubkey, - metadata: account_metadata, - value_vecs: [value_vecs_0, value_vecs_1], - hash_chain_stores: [hash_chain_0, hash_chain_1], - }) + + #[cfg(not(kani))] + { + let (value_vecs_0, account_data) = + ZeroCopyVecU64::new_at(value_vec_capacity, account_data)?; + let (value_vecs_1, account_data) = + ZeroCopyVecU64::new_at(value_vec_capacity, account_data)?; + let (hash_chain_0, account_data) = + ZeroCopyVecU64::new_at(hash_chain_capacity, account_data)?; + let hash_chain_1 = ZeroCopyVecU64::new(hash_chain_capacity, account_data)?; + Ok(BatchedQueueAccount { + pubkey, + metadata: account_metadata, + value_vecs: [value_vecs_0, value_vecs_1], + hash_chain_stores: [hash_chain_0, hash_chain_1], + }) + } + + #[cfg(kani)] + { + // For Kani: use regular Vec instead of ZeroCopyVecU64 to avoid complex initialization + Ok(BatchedQueueAccount { + pubkey, + metadata: account_metadata, + value_vecs: [Vec::new(), Vec::new()], + hash_chain_stores: [Vec::new(), Vec::new()], + }) + } } /// Insert a value into the current batch /// of this output queue account. /// 1. insert value into a value vec and hash chain store. /// 2. Increment next_index. + #[cfg(not(kani))] pub fn insert_into_current_batch( &mut self, hash_chain_value: &[u8; 32], @@ -302,6 +373,16 @@ impl<'a> BatchedQueueAccount<'a> { Ok(()) } + /// Kani stub - not used in verification tests + #[cfg(kani)] + pub fn insert_into_current_batch( + &mut self, + _hash_chain_value: &[u8; 32], + _current_slot: &u64, + ) -> Result<(), BatchedMerkleTreeError> { + panic!("insert_into_current_batch should not be called in Kani tests - use kani_mock_insert instead") + } + /// Proves inclusion of leaf index if it exists in one of the batches. /// 1. Iterate over all batches /// 2. Check if leaf index could exist in the batch. @@ -435,17 +516,38 @@ impl<'a> BatchedQueueAccount<'a> { } } +#[cfg(kani)] +impl<'a> BatchedQueueAccount<'a> { + pub fn kani_mock_insert(&mut self, batch_idx: usize) -> Result<(), BatchedMerkleTreeError> { + self.batch_metadata.batches[batch_idx].kani_mock_output_insert() + } +} + impl Deref for BatchedQueueAccount<'_> { type Target = BatchedQueueMetadata; fn deref(&self) -> &Self::Target { - &self.metadata + #[cfg(not(kani))] + { + &self.metadata + } + #[cfg(kani)] + { + self.metadata + } } } impl DerefMut for BatchedQueueAccount<'_> { fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.metadata + #[cfg(not(kani))] + { + &mut self.metadata + } + #[cfg(kani)] + { + self.metadata + } } } diff --git a/program-libs/batched-merkle-tree/tests/constants.rs b/program-libs/batched-merkle-tree/tests/constants.rs new file mode 100644 index 0000000000..12b148e2da --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/constants.rs @@ -0,0 +1,46 @@ +use light_batched_merkle_tree::constants::ADDRESS_TREE_INIT_ROOT_40; +use light_hasher::{Hasher, Poseidon}; +use light_merkle_tree_reference::indexed::IndexedMerkleTree; +use num_bigint::BigUint; +use num_traits::Num; + +#[test] +fn test_reproduce_address_tree_init_root_40() { + // Method 1: Using IndexedMerkleTree library + let tree = IndexedMerkleTree::::new(40, 0).unwrap(); + let root_from_tree = tree.merkle_tree.root(); + + assert_eq!( + root_from_tree, ADDRESS_TREE_INIT_ROOT_40, + "IndexedMerkleTree root does not match ADDRESS_TREE_INIT_ROOT_40 constant" + ); + + // Method 2: Manual hash computation to verify the constant + // IndexedMerkleTree::new() creates tree with ONE leaf at index 0: + // - IndexedArray::new(0, HIGHEST_ADDRESS_PLUS_ONE) creates element 0 + // - element[0].hash(HIGHEST_ADDRESS_PLUS_ONE) = H(0, HIGHEST_ADDRESS_PLUS_ONE) + // - This single leaf is appended to the merkle tree + + const HIGHEST_ADDRESS_PLUS_ONE: &str = + "452312848583266388373324160190187140051835877600158453279131187530910662655"; + + let max_value = BigUint::from_str_radix(HIGHEST_ADDRESS_PLUS_ONE, 10).unwrap(); + let max_value_bytes = light_hasher::bigint::bigint_to_be_bytes_array::<32>(&max_value).unwrap(); + + // Leaf 0: H(value=0, nextValue=HIGHEST_ADDRESS_PLUS_ONE) + let leaf_0 = Poseidon::hashv(&[&[0u8; 32], &max_value_bytes]).unwrap(); + + // Build merkle tree root from single leaf + // Hash leaf_0 with zero bytes for the empty right sibling + let mut current_root = Poseidon::hashv(&[&leaf_0, &Poseidon::zero_bytes()[0]]).unwrap(); + + // Hash up the tree to height 40 + for i in 1..40 { + current_root = Poseidon::hashv(&[¤t_root, &Poseidon::zero_bytes()[i]]).unwrap(); + } + + assert_eq!( + current_root, ADDRESS_TREE_INIT_ROOT_40, + "Manually computed root does not match ADDRESS_TREE_INIT_ROOT_40 constant" + ); +} diff --git a/program-libs/batched-merkle-tree/tests/kani.rs b/program-libs/batched-merkle-tree/tests/kani.rs new file mode 100644 index 0000000000..7390957820 --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani.rs @@ -0,0 +1,18 @@ +#![cfg(kani)] +// Kani formal verification tests +// This file serves as the entry point for the kani test module +// cargo kani --tests --no-default-features -Z stubbing --features kani +#[path = "kani/batch.rs"] +mod batch; + +#[path = "kani/address_tree_update.rs"] +mod address_tree_update; + +#[path = "kani/ghost_state.rs"] +mod ghost_state; + +#[path = "kani/utils.rs"] +pub mod utils; + +#[path = "kani/state_tree_update.rs"] +mod state_tree_update; diff --git a/program-libs/batched-merkle-tree/tests/kani/address_tree_update.rs b/program-libs/batched-merkle-tree/tests/kani/address_tree_update.rs new file mode 100644 index 0000000000..3e715e1769 --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/address_tree_update.rs @@ -0,0 +1,153 @@ +#![cfg(kani)] +use light_batched_merkle_tree::{ + batch::BatchState, + merkle_tree::{BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs}, +}; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, TreeType, +}; +use light_merkle_tree_metadata::merkle_tree::MerkleTreeMetadata; + +use crate::utils::*; + +// Minimal full test: +// 0. Setup - create a small tree +// 1. fill 2 batches completely +// 2. fully insert both batches +// +// Verified Properties: +// 1. No unsafe roots should be present (internal invariant) +// Post conditions: +// 2. Both batches are in inserted state +// 3. sequence numbers are 3 + 7 and 6 + 7 +// 4. root history contains one root [5u8; 32] +// 5. bloom filter 0 is zeroed +// 6. bloom filter 1 is not zeroed +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_no_unsafe_roots_minimal() { + let mut tree = create_test_tree_small(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + setup_batches(&mut tree, 2); + // Verify setup succeeded + kani::cover!( + tree.queue_batches.batches[0].batch_is_ready_to_insert(), + "Batch 0 is ready after setup" + ); + kani::cover!( + tree.queue_batches.batches[1].batch_is_ready_to_insert(), + "Batch 1 is ready after setup" + ); + for i in 0..1 { + let num_insertions: u8 = 6; + for i in 1..=num_insertions { + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_address_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), // we stub proof verification internally so the proof doesnt matter + }); + kani::cover!(result.is_ok(), "Update succeeded"); + } + } + + // Postcondition 2: Both batches are in inserted state + assert_eq!( + tree.queue_batches.batches[0].get_state(), + BatchState::Inserted + ); + assert_eq!( + tree.queue_batches.batches[1].get_state(), + BatchState::Inserted + ); + // Postcondition 3: Sequence numbers are 3 + 7 and 6 + 7 + assert_eq!(tree.queue_batches.batches[0].sequence_number, 10); + assert_eq!(tree.queue_batches.batches[1].sequence_number, 13); + // Postcondition 4: Root history contains [5u8; 32] + let contains_root_5 = (0..tree.root_history.len()).any(|i| tree.root_history[i] == [6u8; 32]); + assert!(contains_root_5); +} + +// 30 iterations +// VERIFICATION:- SUCCESSFUL +// Verification Time: 647.8201s +// cargo kani --tests --no-default-features -Z stubbing --features kani --harness verify_no_unsafe_roots_one_by_one +/// Comprehensive harness: Verify invariant holds under ALL possible tree states and operations +/// This uses symbolic state generation to explore the entire state space +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_no_unsafe_roots_one_by_one() { + let mut tree = create_test_tree_small(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + for i in 0..30u8 { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 29, "Loop iteration 29"); + setup_zkp_batches(&mut tree, 1); + + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_address_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), + }); + kani::cover!(result.is_ok(), "Update succeeded"); + } +} + +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_no_unsafe_roots_random() { + let mut tree = create_test_tree_small(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + let max_zkp_batches = tree.queue_batches.get_num_zkp_batches() as usize; + + // Initial setup: fill variable number of zkp batches + let initial_zkp_count: usize = max_zkp_batches * 2; + // kani::assume(initial_zkp_count > 0 && initial_zkp_count <= max_zkp_batches * 2); + setup_zkp_batches(&mut tree, initial_zkp_count); + kani::cover!( + tree.queue_batches.batches[0].batch_is_ready_to_insert(), + "Batch 0 is ready after setup" + ); + kani::cover!( + tree.queue_batches.batches[1].batch_is_ready_to_insert(), + "Batch 1 is ready after setup" + ); + for i in 0..2 { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 1, "Loop iteration 1"); + + // Count how many zkp batches are ready to insert across all batches + let total_ready = get_total_ready_zkp_batches(&tree); + + kani::cover!(total_ready > 0, "At least one zkp batch ready"); + + // Insert ALL ready zkp batches to complete all batches + for j in 0..total_ready { + let new_root: [u8; 32] = [((i * 10 + j) as u8); 32]; + let result = tree.update_tree_from_address_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), + }); + kani::cover!(result.is_ok(), "Update succeeded"); + kani::assume(result.is_ok()); // Assume success to continue + } + // Setup variable number of new zkp batches for next iteration + let available_zkp_space = get_available_zkp_space(&tree); + let next_zkp_count: usize = kani::any(); + kani::assume(next_zkp_count > 0 && next_zkp_count <= available_zkp_space); + setup_zkp_batches(&mut tree, next_zkp_count); + } +} diff --git a/program-libs/batched-merkle-tree/tests/kani/batch.rs b/program-libs/batched-merkle-tree/tests/kani/batch.rs new file mode 100644 index 0000000000..30ee19e55b --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/batch.rs @@ -0,0 +1,348 @@ +#![cfg(kani)] +use light_batched_merkle_tree::{ + batch::{Batch, BatchState}, + errors::BatchedMerkleTreeError, +}; + +// Helper to create batch with arbitrary valid configuration +fn any_batch() -> Batch { + let num_iters: u64 = kani::any(); + let bloom_filter_capacity: u64 = kani::any(); + let batch_size: u64 = kani::any(); + let zkp_batch_size: u64 = kani::any(); + let start_index: u64 = kani::any(); + + // Assume valid constraints + kani::assume(num_iters > 0 && num_iters <= 20); + kani::assume(bloom_filter_capacity > 0 && bloom_filter_capacity <= 1000); + kani::assume(batch_size > 0 && batch_size <= 100000); + kani::assume(zkp_batch_size > 0 && zkp_batch_size <= 2000); + kani::assume(batch_size >= zkp_batch_size); // batch_size must be divisible or larger + kani::assume(batch_size % zkp_batch_size == 0); // Must divide evenly + kani::assume(batch_size / zkp_batch_size < 100); // Keep num_zkp_batches reasonable + + Batch::new( + num_iters, + bloom_filter_capacity, + batch_size, + zkp_batch_size, + start_index, + ) +} + +/// Verify that Fill -> Full transition works correctly +#[kani::proof] +fn verify_fill_to_full_transition() { + let mut batch = any_batch(); + // New batch starts in Fill state + assert_eq!(batch.get_state(), BatchState::Fill); + + // Transition should succeed + let result = batch.advance_state_to_full(); + assert!(result.is_ok()); + + // State should be Full after transition + assert_eq!(batch.get_state(), BatchState::Full); +} + +/// Verify that Full -> Inserted transition works correctly +#[kani::proof] +fn verify_full_to_inserted_transition() { + let mut batch = any_batch(); + + // Get to Full state first + batch.advance_state_to_full().unwrap(); + assert_eq!(batch.get_state(), BatchState::Full); + + // Transition should succeed + let result = batch.advance_state_to_inserted(); + assert!(result.is_ok()); + + // State should be Inserted after transition + assert_eq!(batch.get_state(), BatchState::Inserted); +} + +/// Verify that Inserted -> Fill transition works correctly +#[kani::proof] +fn verify_inserted_to_fill_transition() { + let mut batch = any_batch(); + + // Get to Inserted state + batch.advance_state_to_full().unwrap(); + batch.advance_state_to_inserted().unwrap(); + assert_eq!(batch.get_state(), BatchState::Inserted); + + // Transition should succeed + let result = batch.advance_state_to_fill(None); + assert!(result.is_ok()); + + // State should be Fill after transition + assert_eq!(batch.get_state(), BatchState::Fill); + + // Bloom filter should be reset to not zeroed + assert!(!batch.bloom_filter_is_zeroed()); +} + +/// Verify that Inserted -> Fill with start_index works correctly +#[kani::proof] +fn verify_inserted_to_fill_with_start_index() { + let mut batch = any_batch(); + let new_start_index: u64 = kani::any(); + + // Get to Inserted state + batch.advance_state_to_full().unwrap(); + batch.advance_state_to_inserted().unwrap(); + + let result = batch.advance_state_to_fill(Some(new_start_index)); + assert!(result.is_ok()); + assert_eq!(batch.get_state(), BatchState::Fill); +} + +/// Verify that all invalid transitions from Fill fail +#[kani::proof] +fn verify_fill_invalid_transitions() { + let mut batch = any_batch(); + assert_eq!(batch.get_state(), BatchState::Fill); + + // Fill -> Inserted should fail + let result = batch.advance_state_to_inserted(); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Fill); // State unchanged + + // Fill -> Fill should fail + let result = batch.advance_state_to_fill(None); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Fill); // State unchanged +} + +/// Verify that all invalid transitions from Full fail +#[kani::proof] +fn verify_full_invalid_transitions() { + let mut batch = any_batch(); + batch.advance_state_to_full().unwrap(); + assert_eq!(batch.get_state(), BatchState::Full); + + // Full -> Full should fail + let result = batch.advance_state_to_full(); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Full); // State unchanged + + // Full -> Fill should fail + let result = batch.advance_state_to_fill(None); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Full); // State unchanged +} + +/// Verify that all invalid transitions from Inserted fail +#[kani::proof] +fn verify_inserted_invalid_transitions() { + let mut batch = any_batch(); + batch.advance_state_to_full().unwrap(); + batch.advance_state_to_inserted().unwrap(); + assert_eq!(batch.get_state(), BatchState::Inserted); + + // Inserted -> Full should fail + let result = batch.advance_state_to_full(); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Inserted); // State unchanged + + // Inserted -> Inserted should fail + let result = batch.advance_state_to_inserted(); + assert_eq!(result, Err(BatchedMerkleTreeError::BatchNotReady)); + assert_eq!(batch.get_state(), BatchState::Inserted); // State unchanged +} + +/// Verify complete state cycle: Fill -> Full -> Inserted -> Fill +#[kani::proof] +fn verify_complete_state_cycle() { + let mut batch = any_batch(); + assert_eq!(batch.get_state(), BatchState::Fill); + + // Fill -> Full + assert!(batch.advance_state_to_full().is_ok()); + assert_eq!(batch.get_state(), BatchState::Full); + + // Full -> Inserted + assert!(batch.advance_state_to_inserted().is_ok()); + assert_eq!(batch.get_state(), BatchState::Inserted); + + // Inserted -> Fill + assert!(batch.advance_state_to_fill(None).is_ok()); + assert_eq!(batch.get_state(), BatchState::Fill); +} + +/// Verify that state transitions are deterministic +#[kani::proof] +fn verify_state_transition_determinism() { + let mut batch1 = any_batch(); + let mut batch2 = any_batch(); + + // Both should transition identically + assert!(batch1.advance_state_to_full().is_ok()); + assert!(batch2.advance_state_to_full().is_ok()); + + assert_eq!(batch1.get_state(), batch2.get_state()); + assert_eq!(batch1.get_state(), BatchState::Full); +} + +/// Verify that only valid state values map to BatchState enum +#[kani::proof] +fn verify_batch_state_from_u64() { + let value: u64 = kani::any(); + kani::assume(value <= 2); // Valid values are 0, 1, 2 + + let state = BatchState::from(value); + + // Verify bidirectional conversion + let back_to_u64: u64 = state.into(); + assert_eq!(value, back_to_u64); +} + +/// Verify bloom filter flag operations +#[kani::proof] +fn verify_bloom_filter_zeroed_flags() { + let mut batch = any_batch(); + + // Initially not zeroed + assert!(!batch.bloom_filter_is_zeroed()); + + // Set to zeroed + batch.set_bloom_filter_to_zeroed(); + assert!(batch.bloom_filter_is_zeroed()); + + // Set back to not zeroed + batch.set_bloom_filter_to_not_zeroed(); + assert!(!batch.bloom_filter_is_zeroed()); +} + +/// Verify that Inserted->Fill resets bloom_filter_is_zeroed flag +#[kani::proof] +fn verify_fill_transition_resets_bloom_filter_flag() { + let mut batch = any_batch(); + + // Get to Inserted state + batch.advance_state_to_full().unwrap(); + batch.advance_state_to_inserted().unwrap(); + + batch.set_bloom_filter_to_zeroed(); + assert!(batch.bloom_filter_is_zeroed()); + + batch.advance_state_to_fill(None).unwrap(); + + // Should be reset to not zeroed + assert!(!batch.bloom_filter_is_zeroed()); +} + +/// Verify start_slot_is_set flag behavior +#[kani::proof] +fn verify_start_slot_flag() { + let mut batch = any_batch(); + + // Initially not set + assert!(!batch.start_slot_is_set()); + + let slot: u64 = kani::any(); + batch.set_start_slot(&slot); + + // Now it should be set + assert!(batch.start_slot_is_set()); + + // Setting again should be idempotent (still set) + let new_slot: u64 = kani::any(); + batch.set_start_slot(&new_slot); + assert!(batch.start_slot_is_set()); +} + +/// Verify start_slot getter/setter duality +#[kani::proof] +fn verify_start_slot_duality() { + let mut batch = any_batch(); + let slot: u64 = kani::any(); + + batch.set_start_slot(&slot); + + // Setter should mark as set + assert!(batch.start_slot_is_set()); +} + +/// Verify that state transitions cover expected execution paths +#[kani::proof] +fn verify_state_transition_coverage() { + let mut batch = any_batch(); + + batch.advance_state_to_full().unwrap(); + // Cover: Fill -> Full transition occurred + kani::cover!(batch.get_state() == BatchState::Full); + + batch.advance_state_to_inserted().unwrap(); + // Cover: Full -> Inserted transition occurred + kani::cover!(batch.get_state() == BatchState::Inserted); + + batch.advance_state_to_fill(None).unwrap(); + // Cover: Inserted -> Fill transition occurred + kani::cover!(batch.get_state() == BatchState::Fill); +} + +/// Verify that invalid transitions are properly covered +#[kani::proof] +fn verify_invalid_transition_coverage() { + let mut batch = any_batch(); + + let result = batch.advance_state_to_inserted(); + // Cover: Invalid Fill -> Inserted was attempted and failed + kani::cover!(result.is_err() && batch.get_state() == BatchState::Fill); + + let result = batch.advance_state_to_fill(None); + // Cover: Invalid Fill -> Fill was attempted and failed + kani::cover!(result.is_err() && batch.get_state() == BatchState::Fill); +} + +/// Verify getters return correct computed values +#[kani::proof] +fn verify_computed_getters() { + let batch = any_batch(); + + // Test get_num_zkp_batches: should be batch_size / zkp_batch_size + let expected_num_zkp = batch.get_batch_size() / batch.get_zkp_batch_size(); + assert_eq!(batch.get_num_zkp_batches(), expected_num_zkp); + + // Test get_num_hash_chain_store: should equal num_zkp_batches + assert_eq!(batch.get_num_hash_chain_store(), expected_num_zkp as usize); + + // Initially zero inserted + assert_eq!(batch.get_num_inserted_elements(), 0); + assert_eq!(batch.get_num_elements_inserted_into_tree(), 0); +} + +/// Verify that advance_state_to_fill with None preserves start_index +#[kani::proof] +fn verify_fill_without_index_preserves_start_index() { + let mut batch = any_batch(); + + // Get to Inserted state + batch.advance_state_to_full().unwrap(); + batch.advance_state_to_inserted().unwrap(); + + // With None, start_index should be preserved (we can't check the value but verify no error) + let result = batch.advance_state_to_fill(None); + assert!(result.is_ok()); + assert_eq!(batch.get_state(), BatchState::Fill); +} + +/// Verify batch_is_ready_to_insert with fresh batch +#[kani::proof] +fn verify_batch_not_ready_initially() { + let batch = any_batch(); + + // Initially not ready (no full zkp batches) + assert!(!batch.batch_is_ready_to_insert()); +} + +/// Verify get_num_ready_zkp_updates returns 0 initially +#[kani::proof] +fn verify_num_ready_zkp_updates_initial() { + let batch = any_batch(); + + // Initially no ready updates + assert_eq!(batch.get_num_ready_zkp_updates(), 0); +} diff --git a/program-libs/batched-merkle-tree/tests/kani/ghost_state.rs b/program-libs/batched-merkle-tree/tests/kani/ghost_state.rs new file mode 100644 index 0000000000..b63eaeb765 --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/ghost_state.rs @@ -0,0 +1,137 @@ +#![cfg(kani)] +// Unit tests for ghost state tracking in BatchedMerkleTreeAccount. + +use light_batched_merkle_tree::merkle_tree::{ + BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs, +}; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, TreeType, +}; +use light_merkle_tree_metadata::merkle_tree::MerkleTreeMetadata; + +use crate::utils::*; + +/// Verify ghost state invariant holds after tree initialization +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(11)] +fn verify_ghost_state_initial() { + let tree = create_test_tree_small(); + + // Initially, no batches should be zeroed + assert!(!tree.queue_batches.batches[0].bloom_filter_is_zeroed()); + assert!(!tree.queue_batches.batches[1].bloom_filter_is_zeroed()); + + // Ghost state invariant should hold + // This is automatically checked by the structural invariant +} + +/// Verify ghost state is correctly tracked when roots are inserted +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(11)] +fn verify_ghost_state_tracks_roots() { + let mut tree = create_test_tree_small(); + + // Symbolic root to insert + let new_root: [u8; 32] = kani::any(); + kani::assume(new_root != [0u8; 32]); + + let batch_idx = tree.queue_batches.pending_batch_index as usize; + let seq_num = tree.sequence_number + 1; + + // Track root in ghost state + tree.ghost_root_batch + .track_root(batch_idx, seq_num, new_root); + + // Verify root was tracked in correct batch + if batch_idx == 0 { + let tracked = (0..tree.ghost_root_batch.batch_0.len()) + .any(|i| tree.ghost_root_batch.batch_0[i].root == new_root); + assert!(tracked); + } else { + let tracked = (0..tree.ghost_root_batch.batch_1.len()) + .any(|i| tree.ghost_root_batch.batch_1[i].root == new_root); + assert!(tracked); + } +} + +/// Verify invariant when batch 0 is zeroed +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(11)] +fn verify_ghost_state_batch_0_zeroed() { + let mut tree = create_test_tree_small(); + + // Symbolically set batch 0 as zeroed + tree.queue_batches.batches[0].set_bloom_filter_to_zeroed(); + + // Add some symbolic roots to root_history + let num_roots: usize = kani::any(); + kani::assume(num_roots > 0 && num_roots <= tree.root_history.capacity()); + + for _ in 0..num_roots { + let root: [u8; 32] = kani::any(); + tree.root_history.push(root); + + // Track in batch_1 (since batch_0 is zeroed) + tree.ghost_root_batch + .track_root(1, tree.sequence_number, root); + tree.sequence_number += 1; + } + + // The invariant check should pass + // (automatically verified by structural invariant) +} + +/// Comprehensive harness: Verify invariant holds under ALL possible tree states and operations +/// This uses symbolic state generation to explore the entire state space +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_no_unsafe_roots_ever() { + let mut tree = create_test_tree_small(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + setup_batches(&mut tree, 2); + + // // PHASE 0: Setup - fill up to two batches to make them ready + // let num_setup_batches: usize = kani::any(); + // kani::assume(num_setup_batches > 0 && num_setup_batches <= 2); + for i in 0..5 { + // Verify setup succeeded + kani::cover!( + tree.queue_batches.batches[tree.queue_batches.pending_batch_index as usize] + .batch_is_ready_to_insert(), + "Batch is ready after setup" + ); + + let num_insertions: usize = if i == 0 { + 6 // 2 batches + } else { + 3 // 1 batch + }; + + for _ in 0..num_insertions { + let new_root: [u8; 32] = [1u8; 32]; + let result = tree.update_tree_from_address_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), // we stub proof verification so the proof doesnt matter + }); + kani::cover!(result.is_ok(), "Update succeeded"); + } + kani::cover!(i == 2, "i == 2"); + setup_batches(&mut tree, 1); + } +} diff --git a/program-libs/batched-merkle-tree/tests/kani/mod.rs b/program-libs/batched-merkle-tree/tests/kani/mod.rs new file mode 100644 index 0000000000..42aeeef76b --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/mod.rs @@ -0,0 +1,2 @@ +pub mod utils; +pub mod state_tree_update; diff --git a/program-libs/batched-merkle-tree/tests/kani/state_tree_update.rs b/program-libs/batched-merkle-tree/tests/kani/state_tree_update.rs new file mode 100644 index 0000000000..ba01b17a09 --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/state_tree_update.rs @@ -0,0 +1,343 @@ +#![cfg(kani)] +use light_batched_merkle_tree::{ + batch::BatchState, + merkle_tree::{ + BatchedMerkleTreeAccount, InstructionDataBatchAppendInputs, + InstructionDataBatchNullifyInputs, + }, + queue::BatchedQueueAccount, +}; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, TreeType, +}; +use light_merkle_tree_metadata::merkle_tree::MerkleTreeMetadata; + +use crate::utils::*; + +// Minimal full test: +// 0. Setup - create a small state tree +// 1. fill 2 batches completely +// 2. fully insert both batches via input queue (nullify operations) +// +// Verified Properties: +// 1. No unsafe roots should be present (internal invariant) +// Post conditions: +// 2. Both batches are in inserted state +// 3. sequence numbers are 3 + 7 and 6 + 7 +// 4. root history contains one root [6u8; 32] +// 5. bloom filter 0 is zeroed +// 6. bloom filter 1 is not zeroed +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_state_tree_update_minimal() { + let mut tree = create_test_tree_small_state(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + setup_batches(&mut tree, 2); + // Verify setup succeeded + kani::cover!( + tree.queue_batches.batches[0].batch_is_ready_to_insert(), + "Batch 0 is ready after setup" + ); + kani::cover!( + tree.queue_batches.batches[1].batch_is_ready_to_insert(), + "Batch 1 is ready after setup" + ); + for i in 0..1 { + let num_insertions: u8 = 6; + for i in 1..=num_insertions { + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_input_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), // we stub proof verification internally so the proof doesnt matter + }); + kani::cover!(result.is_ok(), "Update succeeded"); + } + } + + // Postcondition 2: Both batches are in inserted state + assert_eq!( + tree.queue_batches.batches[0].get_state(), + BatchState::Inserted + ); + assert_eq!( + tree.queue_batches.batches[1].get_state(), + BatchState::Inserted + ); + // Postcondition 3: Sequence numbers are 3 + 7 and 6 + 7 + assert_eq!(tree.queue_batches.batches[0].sequence_number, 10); + assert_eq!(tree.queue_batches.batches[1].sequence_number, 13); + // Postcondition 4: Root history contains [6u8; 32] + let contains_root_5 = (0..tree.root_history.len()).any(|i| tree.root_history[i] == [6u8; 32]); + assert!(contains_root_5); +} + +// VERIFICATION:- SUCCESSFUL +// Verification Time: 704.7746s +// cargo kani --tests --no-default-features -Z stubbing --features kani --harness verify_state_tree_update_one_by_one +/// Comprehensive harness: Verify invariant holds under ALL possible tree states and operations +/// This uses symbolic state generation to explore the entire state space +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] // Need at least 33 for memcmp on 32-byte arrays + extra for loops +fn verify_state_tree_update_one_by_one() { + let mut tree = create_test_tree_small_state(); + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + for i in 0..30u8 { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 29, "Loop iteration 29"); + setup_zkp_batches(&mut tree, 1); + + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_input_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), + }); + kani::cover!(result.is_ok(), "Update succeeded"); + } +} + +// Minimal full test for output queue (batch append): +// 0. Setup - create a small state tree + output queue +// 1. fill 2 batches completely in output queue +// 2. fully insert both batches via batch append operations +// +// Verified Properties: +// 1. No unsafe roots should be present (internal invariant) +// Post conditions: +// 2. Both queue batches are in inserted state +// 3. Both tree batches are in inserted state +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] +fn verify_state_tree_append_minimal() { + // 0. Setup - create state tree and associated output queue + let mut tree = create_test_tree_small_state(); + let tree_pubkey = *tree.pubkey(); + let mut queue = create_test_output_queue(&tree_pubkey); + + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + // 1. Fill 2 batches completely in output queue + setup_output_queue_batches(&mut queue, 2); + + // Verify setup succeeded + kani::cover!( + queue.batch_metadata.batches[0].batch_is_ready_to_insert(), + "Queue batch 0 is ready after setup" + ); + kani::cover!( + queue.batch_metadata.batches[1].batch_is_ready_to_insert(), + "Queue batch 1 is ready after setup" + ); + + // 2. Fully insert both batches via output queue (batch append) + for i in 1..=6u8 { + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_output_queue_account( + &mut queue, + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof::default(), + }, + ); + kani::cover!(result.is_ok(), "Update succeeded"); + } + + // Postcondition: Both queue batches are in inserted state + assert_eq!( + queue.batch_metadata.batches[0].get_state(), + BatchState::Inserted + ); + assert_eq!( + queue.batch_metadata.batches[1].get_state(), + BatchState::Inserted + ); + // Postcondition 4: Root history contains [6u8; 32] + let contains_root_5 = (0..tree.root_history.len()).any(|i| tree.root_history[i] == [6u8; 32]); + assert!(contains_root_5); +} + +/// Comprehensive harness: Verify invariant holds under ALL possible tree states and operations +/// This uses symbolic state generation to explore the entire state space for output queue operations +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] +fn verify_state_tree_append_one_by_one() { + // 0. Setup - create state tree and associated output queue + let mut tree = create_test_tree_small_state(); + let tree_pubkey = *tree.pubkey(); + let mut queue = create_test_output_queue(&tree_pubkey); + + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + for i in 0..30u8 { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 29, "Loop iteration 29"); + setup_output_queue_zkp_batches(&mut queue, 1); + + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_output_queue_account( + &mut queue, + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof::default(), + }, + ); + kani::cover!(result.is_ok(), "Update succeeded"); + } +} + +// VERIFICATION:- SUCCESSFUL +// Verification Time: 884.6175s +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] +fn verify_state_tree_mixed_one_by_one() { + // 0. Setup - create state tree and associated output queue + let mut tree = create_test_tree_small_state(); + let tree_pubkey = *tree.pubkey(); + let mut queue = create_test_output_queue(&tree_pubkey); + + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + + for i in (0..30u8).step_by(2) { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 28, "Loop iteration 28"); + setup_output_queue_zkp_batches(&mut queue, 1); + // Input queue insertion + setup_zkp_batches(&mut tree, 1); + + let new_root: [u8; 32] = [i; 32]; + let result = tree.update_tree_from_output_queue_account( + &mut queue, + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof::default(), + }, + ); + kani::cover!( + result.is_ok(), + "update_tree_from_output_queue_account succeeded" + ); + + let new_root: [u8; 32] = [i + 1; 32]; + let result = tree.update_tree_from_input_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), + }); + + kani::cover!(result.is_ok(), "update_tree_from_input_queue succeeded"); + } +} + +#[kani::proof] +#[kani::stub( + ::light_compressed_account::hash_to_bn254_field_size_be, + stub_hash_to_bn254 +)] +#[kani::unwind(35)] +fn verify_state_tree_mixed_random() { + // 0. Setup - create state tree and associated output queue + let mut tree = create_test_tree_small_state(); + let tree_pubkey = *tree.pubkey(); + let mut queue = create_test_output_queue(&tree_pubkey); + + kani::cover!(tree.root_history.len() > 0, "Root history non-empty"); + let mut index = 0u8; + for i in (0..10u8) { + kani::cover!(i == 0, "Loop iteration 0"); + kani::cover!(i == 9, "Loop iteration 9"); + setup_output_queue_zkp_batches(&mut queue, 1); + // Input queue insertion + setup_zkp_batches(&mut tree, 1); + + let new_root: [u8; 32] = [index; 32]; + index += 1; + let result = tree.update_tree_from_output_queue_account( + &mut queue, + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof::default(), + }, + ); + kani::cover!( + result.is_ok(), + "update_tree_from_output_queue_account succeeded 0" + ); + + let new_root: [u8; 32] = [index; 32]; + index += 1; + let result = tree.update_tree_from_input_queue(InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof::default(), + }); + + kani::cover!(result.is_ok(), "update_tree_from_input_queue succeeded 0"); + + let new_root: [u8; 32] = [index; 32]; + index += 1; + let result = tree.update_tree_from_output_queue_account( + &mut queue, + InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof::default(), + }, + ); + kani::cover!( + result.is_ok(), + "update_tree_from_output_queue_account succeeded 1" + ); + } + + // for i in 0..5u8 { + // kani::cover!(i == 0, "Loop iteration 0"); + // kani::cover!(i == 1, "Loop iteration 1"); + + // let new_root: [u8; 32] = [i + 13; 32]; + // let selector: bool = kani::any(); + // if selector { + // setup_output_queue_zkp_batches(&mut queue, 1); + // kani::cover!( + // selector, + // "pre update_tree_from_output_queue_account succeeded 1" + // ); + // let result = tree.update_tree_from_output_queue_account( + // &mut queue, + // InstructionDataBatchAppendInputs { + // new_root, + // compressed_proof: CompressedProof::default(), + // }, + // ); + // kani::cover!( + // result.is_ok(), + // "update_tree_from_output_queue_account succeeded 1" + // ); + // } else { + // // Input queue insertion + // setup_zkp_batches(&mut tree, 1); + // let result = tree.update_tree_from_input_queue(InstructionDataBatchNullifyInputs { + // new_root, + // compressed_proof: CompressedProof::default(), + // }); + + // kani::cover!(result.is_ok(), "update_tree_from_input_queue succeeded 1"); + // } + // } +} diff --git a/program-libs/batched-merkle-tree/tests/kani/utils.rs b/program-libs/batched-merkle-tree/tests/kani/utils.rs new file mode 100644 index 0000000000..b807bda8df --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/kani/utils.rs @@ -0,0 +1,301 @@ +#![cfg(kani)] +use light_batched_merkle_tree::{ + batch::BatchState, + merkle_tree::{BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs}, +}; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, TreeType, +}; +use light_merkle_tree_metadata::merkle_tree::MerkleTreeMetadata; + +// Stub for hash_to_bn254_field_size_be +pub fn stub_hash_to_bn254(_input: &[u8]) -> [u8; 32] { + [1u8; 32] +} + +// Helper to create a minimal tree for ghost state testing +pub fn create_test_tree_big() -> BatchedMerkleTreeAccount<'static> { + let batch_size: u64 = 3; //TEST_DEFAULT_BATCH_SIZE; + let zkp_batch_size: u64 = 1; // TEST_DEFAULT_ZKP_BATCH_SIZE; + let root_history_capacity: u32 = 30; + let height = 40; // Address trees require height 40 + let num_iters = 1; + let bloom_filter_capacity = 8; // Minimum 8 bits = 1 byte + + // Calculate required size (includes ghost state when kani feature is enabled) + let size = light_batched_merkle_tree::merkle_tree::get_merkle_tree_account_size( + batch_size, + bloom_filter_capacity, + zkp_batch_size, + root_history_capacity, + height, + ); + + // Allocate using mem::zeroed() reduces branches in Kani + let account_data: &'static mut [u8; 8096] = Box::leak(Box::new(unsafe { std::mem::zeroed() })); + let account_data: &'static mut [u8] = &mut account_data[..size]; + let pubkey = Pubkey::new_from_array([1u8; 32]); + + let init_result = BatchedMerkleTreeAccount::init( + account_data, + &pubkey, + MerkleTreeMetadata::default(), + root_history_capacity, + batch_size, + zkp_batch_size, + height, + num_iters, + bloom_filter_capacity, + TreeType::AddressV2, + ); + + kani::assume(init_result.is_ok()); + kani::cover!(init_result.is_ok(), "init_result"); + init_result.unwrap() +} + +// Helper to create a minimal tree for ghost state testing +pub fn create_test_tree_small() -> BatchedMerkleTreeAccount<'static> { + let batch_size: u64 = 3; + let zkp_batch_size: u64 = 1; + let root_history_capacity: u32 = 7; + let height = 40; // Address trees require height 40 + let num_iters = 1; + let bloom_filter_capacity = 8; // Minimum 8 bits = 1 byte + + // Calculate required size (includes ghost state when kani feature is enabled) + let size = light_batched_merkle_tree::merkle_tree::get_merkle_tree_account_size( + batch_size, + bloom_filter_capacity, + zkp_batch_size, + root_history_capacity, + height, + ); + + // Allocate using mem::zeroed() which Kani understands as properly zero-initialized + let account_data: &'static mut [u8; 2048] = Box::leak(Box::new(unsafe { std::mem::zeroed() })); + let account_data: &'static mut [u8] = &mut account_data[..size]; + let pubkey = Pubkey::new_from_array([1u8; 32]); + + let init_result = BatchedMerkleTreeAccount::init( + account_data, + &pubkey, + MerkleTreeMetadata::default(), + root_history_capacity, + batch_size, + zkp_batch_size, + height, + num_iters, + bloom_filter_capacity, + TreeType::AddressV2, + ); + + kani::assume(init_result.is_ok()); + kani::cover!(init_result.is_ok(), "init_result"); + init_result.unwrap() +} + +// Helper to create a minimal state tree for ghost state testing +pub fn create_test_tree_small_state() -> BatchedMerkleTreeAccount<'static> { + let batch_size: u64 = 3; + let zkp_batch_size: u64 = 1; + let root_history_capacity: u32 = 7; + let height = 32; // State trees use height 32 + let num_iters = 1; + let bloom_filter_capacity = 8; // Minimum 8 bits = 1 byte + + // Calculate required size (includes ghost state when kani feature is enabled) + let size = light_batched_merkle_tree::merkle_tree::get_merkle_tree_account_size( + batch_size, + bloom_filter_capacity, + zkp_batch_size, + root_history_capacity, + height, + ); + + // Allocate using mem::zeroed() which Kani understands as properly zero-initialized + let account_data: &'static mut [u8; 2048] = Box::leak(Box::new(unsafe { std::mem::zeroed() })); + let account_data: &'static mut [u8] = &mut account_data[..size]; + let pubkey = Pubkey::new_from_array([1u8; 32]); + + let init_result = BatchedMerkleTreeAccount::init( + account_data, + &pubkey, + MerkleTreeMetadata::default(), + root_history_capacity, + batch_size, + zkp_batch_size, + height, + num_iters, + bloom_filter_capacity, + TreeType::StateV2, + ); + + kani::assume(init_result.is_ok()); + kani::cover!(init_result.is_ok(), "init_result"); + init_result.unwrap() +} + +// Setup function: Fill up to two batches to make them ready for ZKP processing +// This function populates the hash chain stores and batch metadata needed for tree updates +#[cfg_attr(kani, kani::requires(num_batches > 0 && num_batches <= 2))] +#[cfg_attr(kani, kani::requires(tree.queue_batches.batch_size > 0))] +#[cfg_attr(kani, kani::requires(tree.hash_chain_stores.len() == 2))] +pub fn setup_batches(tree: &mut BatchedMerkleTreeAccount, num_batches: usize) { + let batch_size = tree.queue_batches.batch_size; + let value: [u8; 32] = [2u8; 32]; + + // Insert following currently_processing_batch_index (mirrors real queue behavior) + for i in 0..num_batches { + let current_idx = tree.queue_batches.currently_processing_batch_index as usize; + + for j in 0..batch_size { + let result = tree.kani_mock_insert(current_idx, &value); + kani::assume(result.is_ok()); + } + + // After batch becomes Full, advance to next batch (mirrors queue.rs:590) + tree.queue_batches + .increment_currently_processing_batch_index_if_full(); + } +} + +#[cfg_attr(kani, kani::requires(num_zkp_batches > 0 && num_zkp_batches <= tree.queue_batches.get_num_zkp_batches() as usize * 2))] +#[cfg_attr(kani, kani::requires(tree.queue_batches.batch_size > 0))] +#[cfg_attr(kani, kani::requires(tree.hash_chain_stores.len() == 2))] +pub fn setup_zkp_batches(tree: &mut BatchedMerkleTreeAccount, num_zkp_batches: usize) { + let batch_size = tree.queue_batches.batch_size; + let value: [u8; 32] = [2u8; 32]; + + // Insert following currently_processing_batch_index (mirrors real queue behavior) + for i in 0..num_zkp_batches { + let current_idx = tree.queue_batches.currently_processing_batch_index as usize; + + kani::cover!(i == 0, "Entered setup batch loop"); + let result = tree.kani_mock_insert(current_idx, &value); + kani::assume(result.is_ok()); + // After batch becomes Full, advance to next batch (mirrors queue.rs:590) + // TODO: add increment_currently_processing_batch_index_if_full internally to kani_mock_insert + tree.queue_batches + .increment_currently_processing_batch_index_if_full(); + } +} + +/// Calculate total number of zkp batches ready to insert across both batches +pub fn get_total_ready_zkp_batches(tree: &BatchedMerkleTreeAccount) -> usize { + let batch_0_ready = if tree.queue_batches.batches[0].batch_is_ready_to_insert() { + tree.queue_batches.batches[0].get_num_ready_zkp_updates() + } else { + 0 + }; + let batch_1_ready = if tree.queue_batches.batches[1].batch_is_ready_to_insert() { + tree.queue_batches.batches[1].get_num_ready_zkp_updates() + } else { + 0 + }; + (batch_0_ready + batch_1_ready) as usize +} + +/// Calculate available zkp batch space across both batches +pub fn get_available_zkp_space(tree: &BatchedMerkleTreeAccount) -> usize { + let max_zkp_batches = tree.queue_batches.get_num_zkp_batches() as usize; + + let batch_0_space = if tree.queue_batches.batches[0].get_state() == BatchState::Inserted { + max_zkp_batches + } else { + let num_full = tree.queue_batches.batches[0].get_num_inserted_zkps() + + tree.queue_batches.batches[0].get_num_ready_zkp_updates(); + (max_zkp_batches as u64 - num_full) as usize + }; + + let batch_1_space = if tree.queue_batches.batches[1].get_state() == BatchState::Inserted { + max_zkp_batches + } else { + let num_full = tree.queue_batches.batches[1].get_num_inserted_zkps() + + tree.queue_batches.batches[1].get_num_ready_zkp_updates(); + (max_zkp_batches as u64 - num_full) as usize + }; + + batch_0_space + batch_1_space +} + +// Helper to create a minimal output queue for state tree testing +pub fn create_test_output_queue( + tree_pubkey: &Pubkey, +) -> light_batched_merkle_tree::queue::BatchedQueueAccount<'static> { + use light_batched_merkle_tree::queue::{get_output_queue_account_size, BatchedQueueAccount}; + use light_compressed_account::QueueType; + use light_merkle_tree_metadata::queue::QueueMetadata; + + let batch_size: u64 = 3; + let zkp_batch_size: u64 = 1; + + let size = get_output_queue_account_size(batch_size, zkp_batch_size); + + let account_data: &'static mut [u8; 2048] = Box::leak(Box::new(unsafe { std::mem::zeroed() })); + let account_data: &'static mut [u8] = &mut account_data[..size]; + + let queue_pubkey = Pubkey::new_from_array([2u8; 32]); + + let mut metadata = QueueMetadata::default(); + metadata.associated_merkle_tree = *tree_pubkey; + metadata.queue_type = QueueType::OutputStateV2 as u64; + + let init_result = BatchedQueueAccount::init( + account_data, + metadata, + batch_size, + zkp_batch_size, + 0, // num_iters (usually 0 for output queues) + 0, // bloom_filter_capacity (MUST be 0 for output queues!) + queue_pubkey, + 16, // tree_capacity for height 32 state tree + ); + + // kani::assume(init_result.is_ok()); + kani::cover!(init_result.is_ok(), "Queue init succeeded"); + init_result.unwrap() +} + +// Setup function: Fill output queue batches to make them ready for tree insertion +#[cfg_attr(kani, kani::requires(num_batches > 0 && num_batches <= 2))] +pub fn setup_output_queue_batches( + queue: &mut light_batched_merkle_tree::queue::BatchedQueueAccount, + num_batches: usize, +) { + let batch_size = queue.batch_metadata.batch_size; + + for _i in 0..num_batches { + let current_idx = queue.batch_metadata.currently_processing_batch_index as usize; + + for _j in 0..batch_size { + let result = queue.kani_mock_insert(current_idx); + kani::assume(result.is_ok()); + } + + // After batch becomes Full, advance to next batch + queue + .batch_metadata + .increment_currently_processing_batch_index_if_full(); + } +} + +// Setup function: Fill output queue zkp batches (one zkp batch at a time) +#[cfg_attr(kani, kani::requires(num_zkp_batches > 0))] +pub fn setup_output_queue_zkp_batches( + queue: &mut light_batched_merkle_tree::queue::BatchedQueueAccount, + num_zkp_batches: usize, +) { + for i in 0..num_zkp_batches { + let current_idx = queue.batch_metadata.currently_processing_batch_index as usize; + + kani::cover!(i == 0, "Entered setup output queue zkp batch loop"); + let result = queue.kani_mock_insert(current_idx); + kani::assume(result.is_ok()); + + // After batch becomes Full, advance to next batch + queue + .batch_metadata + .increment_currently_processing_batch_index_if_full(); + } +} diff --git a/program-libs/batched-merkle-tree/tests/merkle_tree.rs b/program-libs/batched-merkle-tree/tests/merkle_tree.rs new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/program-libs/batched-merkle-tree/tests/merkle_tree.rs @@ -0,0 +1 @@ + diff --git a/program-libs/zero-copy/Cargo.toml b/program-libs/zero-copy/Cargo.toml index 3018d402ca..3b127bf0c6 100644 --- a/program-libs/zero-copy/Cargo.toml +++ b/program-libs/zero-copy/Cargo.toml @@ -14,6 +14,7 @@ solana = ["solana-program-error"] pinocchio = ["dep:pinocchio"] derive = ["light-zero-copy-derive"] mut = ["light-zero-copy-derive/mut"] +kani = [] [dependencies] solana-program-error = { workspace = true, optional = true } @@ -27,3 +28,7 @@ zerocopy = { workspace = true, features = ["derive"] } borsh = { workspace = true } trybuild = "1.0" light-zero-copy-derive = { workspace = true } + +[lints.rust.unexpected_cfgs] +level = "allow" +check-cfg = ['cfg(kani)'] diff --git a/program-libs/zero-copy/src/cyclic_vec.rs b/program-libs/zero-copy/src/cyclic_vec.rs index 80815ffe01..30797b6b0e 100644 --- a/program-libs/zero-copy/src/cyclic_vec.rs +++ b/program-libs/zero-copy/src/cyclic_vec.rs @@ -7,7 +7,9 @@ use core::{ #[cfg(feature = "std")] use std::vec::Vec; -use zerocopy::{little_endian::U32, Ref}; +use zerocopy::little_endian::U32; +#[cfg(not(feature = "kani"))] +use zerocopy::Ref; use crate::{add_padding, errors::ZeroCopyError, ZeroCopyTraits}; @@ -17,6 +19,7 @@ pub type ZeroCopyCyclicVecU16<'a, T> = ZeroCopyCyclicVec<'a, u16, T>; pub type ZeroCopyCyclicVecU8<'a, T> = ZeroCopyCyclicVec<'a, u8, T>; pub type ZeroCopyCyclicVecBorsh<'a, T> = ZeroCopyCyclicVec<'a, U32, T>; +#[cfg(not(feature = "kani"))] pub struct ZeroCopyCyclicVec<'a, L, T, const PAD: bool = true> where L: ZeroCopyTraits, @@ -28,6 +31,18 @@ where slice: Ref<&'a mut [u8], [T]>, } +#[cfg(feature = "kani")] +pub struct ZeroCopyCyclicVec<'a, L, T, const PAD: bool = true> +where + L: ZeroCopyTraits, + T: ZeroCopyTraits, + u64: From + TryInto, +{ + /// Simplified struct for kani verification - avoids complex zerocopy Ref type + metadata: [L; 3], // Direct array instead of Ref + slice: &'a mut [T], // Direct slice instead of Ref +} + const CURRENT_INDEX_INDEX: usize = 0; const LENGTH_INDEX: usize = 1; const CAPACITY_INDEX: usize = 2; @@ -55,19 +70,51 @@ where } let (meta_data, bytes) = bytes.split_at_mut(metadata_size); + #[cfg(not(feature = "kani"))] let (mut metadata, _padding) = Ref::<&mut [u8], [L; 3]>::from_prefix(meta_data)?; - if u64::from(metadata[LENGTH_INDEX]) != 0 - || u64::from(metadata[CURRENT_INDEX_INDEX]) != 0 - || u64::from(metadata[CAPACITY_INDEX]) != 0 + #[cfg(feature = "kani")] + let metadata = unsafe { + let ptr = meta_data.as_ptr() as *const [L; 3]; + let mut metadata = core::ptr::read_unaligned(ptr); + if u64::from(metadata[LENGTH_INDEX]) != 0 + || u64::from(metadata[CURRENT_INDEX_INDEX]) != 0 + || u64::from(metadata[CAPACITY_INDEX]) != 0 + { + return Err(ZeroCopyError::MemoryNotZeroed); + } + metadata[CAPACITY_INDEX] = capacity; + let write_ptr = meta_data.as_mut_ptr() as *mut [L; 3]; + core::ptr::write_unaligned(write_ptr, metadata); + metadata + }; + + #[cfg(not(feature = "kani"))] { - return Err(ZeroCopyError::MemoryNotZeroed); + if u64::from(metadata[LENGTH_INDEX]) != 0 + || u64::from(metadata[CURRENT_INDEX_INDEX]) != 0 + || u64::from(metadata[CAPACITY_INDEX]) != 0 + { + return Err(ZeroCopyError::MemoryNotZeroed); + } + metadata[CAPACITY_INDEX] = capacity; } - metadata[CAPACITY_INDEX] = capacity; let capacity_usize: usize = u64::from(metadata[CAPACITY_INDEX]) as usize; + #[cfg(not(feature = "kani"))] let (slice, remaining_bytes) = Ref::<&mut [u8], [T]>::from_prefix_with_elems(bytes, capacity_usize)?; + + #[cfg(feature = "kani")] + let (slice, remaining_bytes) = { + let needed_size = capacity_usize * size_of::(); + let (slice_bytes, remaining) = bytes.split_at_mut(needed_size); + let slice = unsafe { + let ptr = slice_bytes.as_mut_ptr() as *mut T; + core::slice::from_raw_parts_mut(ptr, capacity_usize) + }; + (slice, remaining) + }; Ok((Self { metadata, slice }, remaining_bytes)) } @@ -86,7 +133,15 @@ where } let (meta_data, bytes) = bytes.split_at_mut(metadata_size); + + #[cfg(not(feature = "kani"))] let (metadata, _padding) = Ref::<&mut [u8], [L; 3]>::from_prefix(meta_data)?; + + #[cfg(feature = "kani")] + let metadata = unsafe { + let ptr = meta_data.as_ptr() as *const [L; 3]; + core::ptr::read_unaligned(ptr) + }; let usize_capacity: usize = u64::from(metadata[CAPACITY_INDEX]) as usize; let usize_len: usize = u64::from(metadata[LENGTH_INDEX]) as usize; let usize_current_index: usize = u64::from(metadata[CURRENT_INDEX_INDEX]) as usize; @@ -106,8 +161,20 @@ where full_vector_size + metadata_size, )); } + #[cfg(not(feature = "kani"))] let (slice, remaining_bytes) = Ref::<&mut [u8], [T]>::from_prefix_with_elems(bytes, usize_capacity)?; + + #[cfg(feature = "kani")] + let (slice, remaining_bytes) = { + let needed_size = usize_capacity * size_of::(); + let (slice_bytes, remaining) = bytes.split_at_mut(needed_size); + let slice = unsafe { + let ptr = slice_bytes.as_mut_ptr() as *mut T; + core::slice::from_raw_parts_mut(ptr, usize_capacity) + }; + (slice, remaining) + }; Ok((Self { metadata, slice }, remaining_bytes)) } diff --git a/program-libs/zero-copy/src/vec.rs b/program-libs/zero-copy/src/vec.rs index 24038790c0..b31f9761bd 100644 --- a/program-libs/zero-copy/src/vec.rs +++ b/program-libs/zero-copy/src/vec.rs @@ -7,7 +7,9 @@ use core::{ #[cfg(feature = "std")] use std::vec::Vec; -use zerocopy::{little_endian::U32, IntoBytes, Ref}; +use zerocopy::little_endian::U32; +#[cfg(not(feature = "kani"))] +use zerocopy::{IntoBytes, Ref}; use crate::{add_padding, errors::ZeroCopyError, ZeroCopyTraits}; @@ -21,6 +23,7 @@ pub type ZeroCopyVecBorsh<'a, T> = ZeroCopyVec<'a, U32, T, false>; /// post-initialization reallocations. The size is not known during compile /// time (that makes it different from arrays), but can be defined only once /// (that makes it different from [`Vec`](std::vec::Vec)). +#[cfg(not(feature = "kani"))] pub struct ZeroCopyVec<'a, L, T, const PAD: bool = true> where L: ZeroCopyTraits, @@ -31,6 +34,17 @@ where slice: Ref<&'a mut [u8], [T]>, } +#[cfg(feature = "kani")] +pub struct ZeroCopyVec<'a, L, T, const PAD: bool = true> +where + L: ZeroCopyTraits, + T: ZeroCopyTraits, +{ + /// Simplified struct for kani verification - avoids complex zerocopy Ref type + metadata: [L; 2], // Direct array instead of Ref + slice: &'a mut [T], // Direct slice instead of Ref +} + const LENGTH_INDEX: usize = 0; const CAPACITY_INDEX: usize = 1; @@ -54,15 +68,49 @@ where } let (meta_data, bytes) = bytes.split_at_mut(metadata_size); + #[cfg(not(feature = "kani"))] let (mut metadata, _padding) = Ref::<&mut [u8], [L; 2]>::from_prefix(meta_data)?; - if u64::from(metadata[LENGTH_INDEX]) != 0 || u64::from(metadata[CAPACITY_INDEX]) != 0 { - return Err(ZeroCopyError::MemoryNotZeroed); + + #[cfg(feature = "kani")] + let metadata = unsafe { + let ptr = meta_data.as_ptr() as *const [L; 2]; + let mut metadata = core::ptr::read_unaligned(ptr); + if u64::from(metadata[LENGTH_INDEX]) != 0 || u64::from(metadata[CAPACITY_INDEX]) != 0 { + return Err(ZeroCopyError::MemoryNotZeroed); + } + metadata[CAPACITY_INDEX] = capacity; + let write_ptr = meta_data.as_mut_ptr() as *mut [L; 2]; + core::ptr::write_unaligned(write_ptr, metadata); + metadata + }; + + #[cfg(not(feature = "kani"))] + { + if u64::from(metadata[LENGTH_INDEX]) != 0 || u64::from(metadata[CAPACITY_INDEX]) != 0 { + return Err(ZeroCopyError::MemoryNotZeroed); + } + metadata[CAPACITY_INDEX] = capacity; } - metadata[CAPACITY_INDEX] = capacity; let capacity_usize: usize = u64::from(metadata[CAPACITY_INDEX]) as usize; + #[cfg(not(feature = "kani"))] let (slice, remaining_bytes) = Ref::<&mut [u8], [T]>::from_prefix_with_elems(bytes, capacity_usize)?; + + #[cfg(feature = "kani")] + let (slice, remaining_bytes) = { + let needed_size = capacity_usize * size_of::(); + let (slice_bytes, remaining) = bytes.split_at_mut(needed_size); + // Check alignment for T - required for safe slice creation + if !(slice_bytes.as_ptr() as usize).is_multiple_of(core::mem::align_of::()) { + return Err(ZeroCopyError::UnalignedPointer); + } + let slice = unsafe { + let ptr = slice_bytes.as_mut_ptr() as *mut T; + core::slice::from_raw_parts_mut(ptr, capacity_usize) + }; + (slice, remaining) + }; Ok((Self { metadata, slice }, remaining_bytes)) } @@ -82,7 +130,16 @@ where } let (meta_data, bytes) = bytes.split_at_mut(metadata_size); + + #[cfg(not(feature = "kani"))] let (metadata, _padding) = Ref::<&mut [u8], [L; 2]>::from_prefix(meta_data)?; + + #[cfg(feature = "kani")] + let metadata = unsafe { + let ptr = meta_data.as_ptr() as *const [L; 2]; + core::ptr::read_unaligned(ptr) + }; + let usize_capacity: usize = u64::from(metadata[CAPACITY_INDEX]) as usize; let usize_len: usize = u64::from(metadata[LENGTH_INDEX]) as usize; @@ -97,8 +154,26 @@ where full_vector_size + metadata_size, )); } + + #[cfg(not(feature = "kani"))] let (slice, remaining_bytes) = Ref::<&mut [u8], [T]>::from_prefix_with_elems(bytes, usize_capacity)?; + + #[cfg(feature = "kani")] + let (slice, remaining_bytes) = { + let needed_size = usize_capacity * size_of::(); + let (slice_bytes, remaining) = bytes.split_at_mut(needed_size); + // Check alignment for T - required for safe slice creation + if !(slice_bytes.as_ptr() as usize).is_multiple_of(core::mem::align_of::()) { + return Err(ZeroCopyError::UnalignedPointer); + } + let slice = unsafe { + let ptr = slice_bytes.as_mut_ptr() as *mut T; + core::slice::from_raw_parts_mut(ptr, usize_capacity) + }; + (slice, remaining) + }; + Ok((Self { metadata, slice }, remaining_bytes)) } @@ -149,7 +224,18 @@ where .try_into() .map_err(|_| ZeroCopyError::InvalidConversion) .unwrap(); + #[cfg(not(feature = "kani"))] self.slice.as_mut_bytes().fill(0); + #[cfg(feature = "kani")] + { + let slice_bytes = unsafe { + slice::from_raw_parts_mut( + self.slice.as_mut_ptr() as *mut u8, + core::mem::size_of_val(self.slice), + ) + }; + slice_bytes.fill(0); + } } #[inline] diff --git a/program-libs/zero-copy/tests/kani.rs b/program-libs/zero-copy/tests/kani.rs new file mode 100644 index 0000000000..1148f3b6ae --- /dev/null +++ b/program-libs/zero-copy/tests/kani.rs @@ -0,0 +1,83 @@ +#![cfg(kani)] +// Kani formal verification tests for ZeroCopyCyclicVec and ZeroCopyVec +// cargo kani --tests --no-default-features -Z stubbing --features kani + +use light_zero_copy::{cyclic_vec::ZeroCopyCyclicVecU32, vec::ZeroCopyVecU32}; + +// without kani feature Verification Time: 214.86237s +// with kani feature Verification Time: 1.6097491s +/// Verify that push operations work correctly and maintain cyclic behavior +#[kani::proof] +#[kani::unwind(12)] +fn verify_cyclic_vec_push() { + let mut buffer = [0u8; 512]; + let capacity: u32 = kani::any(); + + // Bound capacity for faster verification + kani::assume(capacity > 0 && capacity <= 5); + + let required_size = ZeroCopyCyclicVecU32::::required_size_for_capacity(capacity); + kani::assume(buffer.len() >= required_size); + + let mut vec = ZeroCopyCyclicVecU32::::new(capacity, &mut buffer).unwrap(); + + // Verify initial state + assert_eq!(vec.len(), 0); + assert!(vec.is_empty()); + + // Push elements up to twice the capacity to test cyclic behavior + let push_count = capacity * 2; + for i in 0..push_count { + vec.push(i); + + // Length should grow until capacity, then stay at capacity + let expected_len = ((i + 1) as usize).min(capacity as usize); + assert_eq!(vec.len(), expected_len); + + // Length should never exceed capacity (cyclic property) + assert!(vec.len() <= vec.capacity()); + } + + // After pushing 2*capacity elements, length should equal capacity + assert_eq!(vec.len(), capacity as usize); +} + +/// Verify that ZeroCopyVec push operations work correctly +#[kani::proof] +#[kani::unwind(12)] +fn verify_vec_push() { + let mut buffer = [0u8; 512]; + let capacity: u32 = kani::any(); + + // Bound capacity for faster verification + kani::assume(capacity > 0 && capacity <= 5); + + let required_size = ZeroCopyVecU32::::required_size_for_capacity(capacity); + kani::assume(buffer.len() >= required_size); + + let mut vec = ZeroCopyVecU32::::new(capacity, &mut buffer).unwrap(); + + // Verify initial state + assert_eq!(vec.len(), 0); + assert!(vec.is_empty()); + assert_eq!(vec.capacity(), capacity as usize); + + // Push elements up to capacity + for i in 0..capacity { + assert!(vec.push(i).is_ok()); + assert_eq!(vec.len(), (i + 1) as usize); + assert!(!vec.is_empty()); + + // Verify the element was added correctly + assert_eq!(vec.get(i as usize), Some(&i)); + } + + // Verify vector is full at capacity + assert_eq!(vec.len(), capacity as usize); + + // Verify pushing beyond capacity fails + assert!(vec.push(capacity).is_err()); + + // Length should still be at capacity after failed push + assert_eq!(vec.len(), capacity as usize); +} diff --git a/program-tests/batched-merkle-tree-test/Cargo.toml b/program-tests/batched-merkle-tree-test/Cargo.toml index bb0b434421..d63e8565ed 100644 --- a/program-tests/batched-merkle-tree-test/Cargo.toml +++ b/program-tests/batched-merkle-tree-test/Cargo.toml @@ -22,6 +22,7 @@ light-bloom-filter = { workspace = true, features = ["solana"] } light-zero-copy = { workspace = true } solana-pubkey = { workspace = true } light-merkle-tree-metadata = { workspace = true } +light-array-map = { workspace = true} [lints.rust.unexpected_cfgs] level = "allow" diff --git a/program-tests/batched-merkle-tree-test/tests/e2e.rs b/program-tests/batched-merkle-tree-test/tests/e2e.rs new file mode 100644 index 0000000000..cf205e8b63 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e.rs @@ -0,0 +1,2 @@ +#[path = "e2e_tests/mod.rs"] +mod e2e_tests; diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs new file mode 100644 index 0000000000..5e5ebb037a --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/address.rs @@ -0,0 +1,277 @@ +#![allow(unused_assignments)] + +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, NUM_BATCHES}, + errors::BatchedMerkleTreeError, + initialize_address_tree::{ + get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, + InitAddressTreeAccountsInstructionData, + }, + merkle_tree::BatchedMerkleTreeAccount, +}; +use light_bloom_filter::BloomFilterError; +use light_compressed_account::pubkey::Pubkey; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::MockBatchedAddressForester; +use rand::rngs::StdRng; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[serial] +#[tokio::test] +async fn test_fill_address_tree_completely() { + spawn_prover().await; + let mut current_slot = 1; + let roothistory_capacity = vec![17, 80]; + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = + MockBatchedAddressForester::<{ DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize }>::default(); + + let mut params = InitAddressTreeAccountsInstructionData::test_default(); + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let mt_account_size = get_address_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + + init_batched_address_merkle_tree_account( + owner, + params, + &mut mt_account_data, + merkle_tree_rent, + mt_pubkey, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + + let num_tx = NUM_BATCHES * params.input_queue_batch_size as usize; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + println!("Input insert -----------------------------"); + let mut rnd_address = get_rnd_bytes(&mut rng); + rnd_address[0] = 0; + + let mut pre_account_data = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data, &mt_pubkey) + .unwrap(); + let pre_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + merkle_tree_account + .insert_address_into_queue(&rnd_address, ¤t_slot) + .unwrap(); + assert_input_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![rnd_address], + vec![rnd_address], + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + mock_indexer.queue_leaves.push(rnd_address); + + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = + merkle_tree_account.insert_address_into_queue(&rnd_address, ¤t_slot); + println!("tx {}", tx); + println!("errors {:?}", result); + if tx == params.input_queue_batch_size as usize * 2 - 1 { + // Error when the value is already inserted into the other batch. + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } else if tx == params.input_queue_batch_size as usize - 1 { + // Error when the value is already inserted into the other batch. + // This occurs only when we switch the batch in this test. + assert_eq!( + result.unwrap_err(), + BatchedMerkleTreeError::NonInclusionCheckFailed + ); + } else { + // Error when inserting into the bloom filter directly twice. + assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + + current_slot += 1; + } + // Try to insert first value into any batch + if tx == 0 { + first_value = rnd_address; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + let result = merkle_tree_account.insert_address_into_queue( + &first_value.to_vec().try_into().unwrap(), + ¤t_slot, + ); + println!("tx {}", tx); + println!("result {:?}", result); + if tx == params.input_queue_batch_size as usize * 2 - 1 { + // Error when the value is already inserted into the other batch. + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } else if tx >= params.input_queue_batch_size as usize - 1 + // || tx == params.input_queue_batch_size as usize + { + // Error when the value is already inserted into the other batch. + // This occurs only when we switch the batch in this test. + assert_eq!( + result.unwrap_err(), + BatchedMerkleTreeError::NonInclusionCheckFailed + ); + } else { + // Error when inserting into the bloom filter directly twice. + assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + current_slot += 1; + + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let result = merkle_tree_account.insert_address_into_queue(&rnd_bytes, ¤t_slot); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = 10; + let mut batch_roots: Vec<(u32, Vec<[u8; 32]>)> = { + let merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let initial_root = *merkle_tree_account.root_history.last().unwrap(); + vec![(0, vec![initial_root])] + }; + for i in 0..num_updates { + println!("address update ----------------------------- {}", i); + perform_address_update( + &mut mt_account_data, + &mut mock_indexer, + mt_pubkey, + &mut batch_roots, + ) + .await; + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let merkle_tree_account = + BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + // assert other batch is not zeroed + let batch_one = merkle_tree_account.queue_batches.batches.get(1).unwrap(); + assert!(!batch_one.bloom_filter_is_zeroed()); + + // after 5 updates the first batch is completely inserted + // As soon as we switch to inserting the second batch we zero out the first batch since + // the second batch is completely full. + if i >= 5 { + assert!(batch.bloom_filter_is_zeroed()); + + // Assert that all unsafe roots from batch 0 are zeroed + let (_, unsafe_roots) = batch_roots.iter().find(|(idx, _)| *idx == 0).unwrap(); + assert_eq!(unsafe_roots.len(), 6, "batch_roots {:?}", batch_roots); + for unsafe_root in unsafe_roots { + assert!( + !merkle_tree_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch 0 should be zeroed: {:?}", + unsafe_root + ); + } + } else { + assert!(!batch.bloom_filter_is_zeroed()); + } + } + // assert all bloom_filters are inserted + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { + assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + // first batch is zeroed out since the second batch is full + assert!(batch.bloom_filter_is_zeroed()); + } else { + // second batch is not zeroed out since the first batch is empty + assert!(!batch.bloom_filter_is_zeroed()); + } + } + } + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!("root history {:?}", merkle_tree_account.root_history); + let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); + + for root in merkle_tree_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_account.root_history[pre_batch_zero.root_index as usize] + ); + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; + let start = merkle_tree_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); + break; + } + println!("index {:?}", index); + assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); + } + } + } +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs new file mode 100644 index 0000000000..aa225df49a --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/e2e.rs @@ -0,0 +1,347 @@ +#![allow(unused_assignments)] +use std::cmp::min; + +use crate::e2e_tests::shared::*; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{ + ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, + DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES, + }, + errors::BatchedMerkleTreeError, + initialize_address_tree::{ + get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, + InitAddressTreeAccountsInstructionData, + }, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, + test_utils::get_state_merkle_tree_account_size_from_params, + InitStateTreeAccountsInstructionData, + }, + merkle_tree::{ + assert_batch_adress_event, assert_batch_append_event_event, assert_nullify_event, + test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + }, + merkle_tree_metadata::BatchedMerkleTreeMetadata, + queue::{ + test_utils::{ + get_output_queue_account_size_default, get_output_queue_account_size_from_params, + }, + BatchedQueueAccount, BatchedQueueMetadata, + }, +}; +use light_bloom_filter::{BloomFilter, BloomFilterError}; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_hasher::{Hasher, Poseidon}; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{ + MockBatchedAddressForester, MockBatchedForester, MockTxEvent, +}; +use light_zero_copy::vec::ZeroCopyVecU64; +use rand::{rngs::StdRng, Rng}; +use serial_test::serial; + +/// queues with a counter which keeps things below X tps and an if that +/// executes tree updates when possible. +#[serial] +#[tokio::test] +async fn test_e2e() { + spawn_prover().await; + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let params = InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update; + let mut out_ready_for_update; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + let mut current_slot = rng.gen(); + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + // Output queue + { + if rng.gen_bool(0.5) { + println!("Output insert -----------------------------"); + println!("num_output_values: {}", num_output_values); + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut pre_account_bytes = output_queue_account_data.clone(); + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); + let pre_account = *pre_output_account.get_metadata(); + let pre_value_store = pre_output_account.value_vecs; + let pre_hash_chains = pre_output_account.hash_chain_stores; + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + output_account + .insert_into_current_batch(&rnd_bytes, ¤t_slot) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_value_store, + pre_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + vec![rnd_bytes], + current_slot, + ) + .unwrap(); + current_slot += 1; + num_output_values += 1; + mock_indexer.output_queue_leaves.push(rnd_bytes); + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + out_ready_for_update = output_account + .batch_metadata + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::Full); + } + + // Input queue + { + let mut pre_account_bytes = mt_account_data.clone(); + + if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { + println!("Input insert -----------------------------"); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + + let pre_mt_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes, &mt_pubkey) + .unwrap(); + let pre_account = *pre_mt_account.get_metadata(); + let pre_hash_chains = pre_mt_account.hash_chain_stores; + let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); + let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + merkle_tree_account + .insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ) + .unwrap(); + + { + let mut mt_account_data = mt_account_data.clone(); + let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes( + &mut mt_account_data, + &mt_pubkey, + ) + .unwrap(); + assert_nullifier_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + } + num_input_values += 1; + } + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + in_ready_for_update = merkle_tree_account + .queue_batches + .batches + .iter() + .any(|batch| batch.get_state() == BatchState::Full); + } + + if in_ready_for_update { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + in_ready_for_update = false; + perform_input_update(&mut pre_mt_account_data, &mut mock_indexer, true, mt_pubkey) + .await; + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = output_account + .value_vecs + .get(next_full_batch as usize) + .unwrap() + .to_vec(); + println!("leaves {:?}", leaves.len()); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for leaf in &leaves[start..end] { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(*leaf); + } + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + println!( + "post update: sequence number: {}", + account.get_metadata().sequence_number + ); + println!("output_res {:?}", output_res); + assert!(output_res.is_ok()); + + println!("output update success {}", num_output_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let old_output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + assert_merkle_tree_update( + old_account, + account, + Some(old_output_account), + Some(output_account), + new_root, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs new file mode 100644 index 0000000000..b37f796bcf --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/mod.rs @@ -0,0 +1,4 @@ +pub mod address; +pub mod shared; +pub mod simulate_txs; +pub mod state; diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs new file mode 100644 index 0000000000..e92892b67a --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/shared.rs @@ -0,0 +1,1236 @@ +#![allow(unused_assignments)] + +use std::collections::HashSet; + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::DEFAULT_BATCH_STATE_TREE_HEIGHT, + errors::BatchedMerkleTreeError, + merkle_tree::{ + assert_batch_adress_event, BatchedMerkleTreeAccount, InstructionDataBatchNullifyInputs, + }, + merkle_tree_metadata::BatchedMerkleTreeMetadata, + queue::{BatchedQueueAccount, BatchedQueueMetadata}, +}; +use light_bloom_filter::BloomFilter; +use light_compressed_account::{ + instruction_data::compressed_proof::CompressedProof, pubkey::Pubkey, +}; +use light_hasher::{Hasher, Poseidon}; +use light_test_utils::mock_batched_forester::{MockBatchedAddressForester, MockBatchedForester}; +use light_zero_copy::vec::ZeroCopyVecU64; +use rand::{rngs::StdRng, Rng}; + +pub async fn perform_address_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedAddressForester<40>, + mt_pubkey: Pubkey, + batch_roots: &mut Vec<(u32, Vec<[u8; 32]>)>, +) { + println!("pre address update -----------------------------"); + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_account = BatchedMerkleTreeAccount::address_from_bytes( + cloned_mt_account_data.as_mut_slice(), + &mt_pubkey, + ) + .unwrap(); + let (input_res, new_root, _pre_next_full_batch) = { + let mut account = + BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let next_index = account.get_metadata().next_index; + println!("next index {:?}", next_index); + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + let batch_start_index = + batch.start_index + batch.get_num_inserted_zkps() * batch.zkp_batch_size; + println!("batch start index {}", batch_start_index); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let current_root = account.root_history.last().unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_address_proof( + account.get_metadata().queue_batches.batch_size as u32, + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + next_index as usize, + batch_start_index as usize, + *current_root, + ) + .await + .unwrap(); + + mock_indexer.finalize_batch_address_update(10); + assert_eq!(mock_indexer.merkle_tree.root(), new_root); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_address_queue(instruction_data), + new_root, + next_full_batch, + ) + }; + println!("post address update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let event = input_res.unwrap(); + assert_batch_adress_event(event, new_root, &old_account, mt_pubkey); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = + BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let batch_index_for_this_root = _pre_next_full_batch as u32; + if let Some((_idx, roots)) = batch_roots + .iter_mut() + .find(|(idx, _)| *idx == batch_index_for_this_root) + { + roots.push(new_root); + } else { + batch_roots.push((batch_index_for_this_root, vec![new_root])); + } + + assert_address_merkle_tree_update(old_account, account, new_root, batch_roots); +} + +pub fn assert_merkle_tree_update( + mut old_account: BatchedMerkleTreeAccount, + account: BatchedMerkleTreeAccount, + old_queue_account: Option, + queue_account: Option, + root: [u8; 32], + batch_roots: &mut ArrayMap, 2>, +) { + old_account.sequence_number += 1; + old_account.root_history.push(root); + println!("Adding root: {:?}", root); + // Determine batch index and state for this update + // For both input and output updates, use the INPUT queue's batch index + // because that's what controls root zeroing + let (batch_idx, _) = { + let idx = old_account.queue_batches.pending_batch_index; + let state = old_account + .queue_batches + .batches + .get(idx as usize) + .unwrap() + .get_state(); + (idx as u32, state) + }; + if let Some(roots) = batch_roots.get_mut_by_key(&batch_idx) { + roots.push(root) + } else { + batch_roots.insert(batch_idx, vec![root], ()).unwrap(); + } + + let input_queue_previous_batch_state = + old_account.queue_batches.get_previous_batch().get_state(); + let input_queue_current_batch = old_account.queue_batches.get_current_batch(); + let previous_batch_index = old_account.queue_batches.get_previous_batch_index(); + let is_half_full = input_queue_current_batch.get_num_inserted_elements() + >= input_queue_current_batch.batch_size / 2 + && input_queue_current_batch.get_state() != BatchState::Inserted; + let root_history_len = old_account.root_history.capacity() as u64; + let previous_batch = old_account.queue_batches.get_previous_batch(); + let no_insert_since_last_batch_root = (previous_batch + .sequence_number + .saturating_sub(root_history_len)) + == old_account.sequence_number; + if is_half_full + && input_queue_previous_batch_state == BatchState::Inserted + && !old_account + .queue_batches + .get_previous_batch() + .bloom_filter_is_zeroed() + && !no_insert_since_last_batch_root + { + println!("Entering zeroing block for batch {}", previous_batch_index); + println!( + "Previous batch state: {:?}", + input_queue_previous_batch_state + ); + println!( + "Previous batch: {:?}", + old_account.queue_batches.get_previous_batch() + ); + old_account + .queue_batches + .get_previous_batch_mut() + .set_bloom_filter_to_zeroed(); + old_account.bloom_filter_stores[previous_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + let previous_full_batch = old_account + .queue_batches + .batches + .get(previous_batch_index) + .unwrap(); + let sequence_number = previous_full_batch.sequence_number; + + // Log the last unsafe root + let last_unsafe_root_index = previous_full_batch.root_index; + let first_safe_root_index = last_unsafe_root_index + 1; + println!("DEBUG: Last unsafe root index: {}", last_unsafe_root_index); + println!("DEBUG: First safe root index: {}", first_safe_root_index); + if let Some(last_unsafe_root) = old_account + .root_history + .get(last_unsafe_root_index as usize) + { + println!( + "DEBUG: Last unsafe root at index {}: {:?}", + last_unsafe_root_index, + &last_unsafe_root[0..4] + ); + } + + let overlapping_roots_exits = sequence_number > old_account.sequence_number; + if overlapping_roots_exits { + let mut oldest_root_index = old_account.root_history.first_index(); + // 2.1. Get, num of remaining roots. + // Remaining roots have not been updated since + // the update of the previous batch hence enable to prove + // inclusion of values nullified in the previous batch. + let num_remaining_roots = sequence_number - old_account.sequence_number; + // 2.2. Zero out roots oldest to first safe root index. + for _ in 0..num_remaining_roots { + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + + // Assert that all unsafe roots from this batch are zeroed + let batch_key = previous_batch_index as u32; + if let Some(unsafe_roots) = batch_roots.get_by_key(&batch_key) { + for unsafe_root in unsafe_roots { + assert!( + !old_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch {} should be zeroed: {:?} root history {:?}, unsafe roots {:?}", + previous_batch_index, + unsafe_root, + old_account.root_history, unsafe_roots + ); + } + // Clear unsafe roots after verification - batch index will be reused + if let Some(roots) = batch_roots.get_mut_by_key(&batch_key) { + roots.clear(); + } + } + + // Assert that the correct number of roots remain non-zero + // Calculate expected non-zero roots: those created since the last zeroing + let non_zero_roots: Vec<[u8; 32]> = old_account + .root_history + .iter() + .filter(|root| **root != [0u8; 32]) + .copied() + .collect(); + + // Expected number of non-zero roots = number of updates since last zeroing + // This is the sequence difference that wasn't zeroed + let expected_non_zero = old_account.root_history.len() - num_remaining_roots as usize; + + assert_eq!( + non_zero_roots.len(), + expected_non_zero, + "Expected {} non-zero roots after zeroing, but found {}. Root history: {:?}", + expected_non_zero, + non_zero_roots.len(), + old_account.root_history + ); + + // Assert that all remaining non-zero roots are tracked in the current (non-zeroed) batch + let current_batch_idx = old_account.queue_batches.pending_batch_index as u32; + if let Some(current_batch_roots) = batch_roots.get_by_key(¤t_batch_idx) { + // Debug: print the entire root history + println!("DEBUG: Root history after zeroing:"); + for (i, root) in old_account.root_history.iter().enumerate() { + if *root != [0u8; 32] { + println!(" Index {}: {:?}", i, root); + } + } + + // Debug: print all tracked roots for current batch and their indices + println!("DEBUG: Roots tracked for batch {}:", current_batch_idx); + for (i, root) in current_batch_roots.iter().enumerate() { + let root_index = old_account.root_history.iter().position(|r| r == root); + println!(" Root {}: {:?} at index {:?}", i, root, root_index); + } + let next_batch_index = (current_batch_idx + 1) % 2; + println!("DEBUG: Roots tracked for next batch {}:", next_batch_index); + for (i, root) in batch_roots + .get_by_key(&next_batch_index) + .as_ref() + .unwrap() + .iter() + .enumerate() + { + let root_index = old_account.root_history.iter().position(|r| r == root); + println!(" Root {}: {:?} at index {:?}", i, root, root_index); + } + + for non_zero_root in &non_zero_roots { + // Skip the initial root (usually all zeros or a known starting value) + // which might not be tracked in any batch + if old_account.sequence_number > 0 { + assert!( + current_batch_roots.contains(non_zero_root), + "Non-zero root {:?} should be tracked in current batch {} but wasn't found. Current batch roots: {:?}", + non_zero_root, + current_batch_idx, + current_batch_roots + ); + } + } + + // Also verify the count matches + println!("DEBUG: current_batch_idx: {}", current_batch_idx); + println!( + "DEBUG: current_batch_roots.len(): {}", + current_batch_roots.len() + ); + println!("DEBUG: non_zero_roots.len(): {}", non_zero_roots.len()); + println!( + "DEBUG: merkle_tree.sequence_number: {}", + old_account.sequence_number + ); + println!("DEBUG: num_remaining_roots: {}", num_remaining_roots); + println!("DEBUG: previous_batch.sequence_number: {}", sequence_number); + assert_eq!( + current_batch_roots.len(), + non_zero_roots.len(), + "Current batch {} should have {} roots tracked, but has {}", + current_batch_idx, + non_zero_roots.len(), + current_batch_roots.len() + ); + } + } + } + // Output queue update + if let Some(mut old_queue_account) = old_queue_account { + let queue_account = queue_account.unwrap(); + let old_full_batch_index = old_queue_account.batch_metadata.pending_batch_index; + let old_full_batch = old_queue_account + .batch_metadata + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + old_account.root_history.capacity() as u32, + ) + .unwrap(); + + if old_full_batch.get_state() == BatchState::Inserted { + old_queue_account.batch_metadata.pending_batch_index += 1; + old_queue_account.batch_metadata.pending_batch_index %= 2; + } + assert_eq!( + queue_account.get_metadata(), + old_queue_account.get_metadata() + ); + assert_eq!(queue_account, old_queue_account); + // Only the output queue appends state + let zkp_batch_size = old_account.queue_batches.zkp_batch_size; + old_account.next_index += zkp_batch_size; + } else { + // Input queue update + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let history_capacity = old_account.root_history.capacity(); + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; + let zkp_batch_size = old_account.queue_batches.zkp_batch_size; + old_account.nullifier_next_index += zkp_batch_size; + + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + println!( + "current batch {:?}", + old_full_batch.get_num_inserted_elements() + ); + + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_batches.pending_batch_index += 1; + old_account.queue_batches.pending_batch_index %= 2; + } + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let zeroed_batch = old_full_batch.get_num_inserted_elements() + >= old_full_batch.batch_size / 2 + && old_full_batch.get_state() != BatchState::Inserted; + println!("zeroed_batch: {:?}", zeroed_batch); + + let state = account.queue_batches.batches[previous_full_batch_index].get_state(); + let root_history_len = old_account.root_history.capacity() as u64; + let old_account_sequence_number = old_account.sequence_number; + let previous_batch_sequence_number = old_account + .queue_batches + .batches + .get(previous_full_batch_index) + .unwrap() + .sequence_number; + let no_insert_since_last_batch_root = (previous_batch_sequence_number + .saturating_sub(root_history_len)) + == old_account_sequence_number; + println!( + "zeroing out values: {}", + zeroed_batch && state == BatchState::Inserted + ); + if zeroed_batch && state == BatchState::Inserted && !no_insert_since_last_batch_root { + println!( + "DEBUG: Entering OUTPUT queue zeroing block for batch {}", + previous_full_batch_index + ); + let previous_batch = old_account + .queue_batches + .batches + .get_mut(previous_full_batch_index) + .unwrap(); + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch_sequence_number; + let overlapping_roots_exits = sequence_number > old_account_sequence_number; + if overlapping_roots_exits { + old_account.bloom_filter_stores[previous_full_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account_sequence_number; + println!("num_remaining_roots: {}", num_remaining_roots); + println!("sequence_number: {}", account.sequence_number); + for _ in 0..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + + // Assert that all unsafe roots from this batch are zeroed + let batch_key = previous_full_batch_index as u32; + if let Some(unsafe_roots) = batch_roots.get_by_key(&batch_key) { + for unsafe_root in unsafe_roots { + assert!( + !old_account.root_history.iter().any(|x| *x == *unsafe_root), + "Unsafe root from batch {} should be zeroed: {:?}", + previous_full_batch_index, + unsafe_root + ); + } + // Clear unsafe roots after verification - batch index will be reused + if let Some(roots) = batch_roots.get_mut_by_key(&batch_key) { + roots.clear(); + } + } + + // Assert that the correct number of roots remain non-zero + let non_zero_roots: Vec<[u8; 32]> = old_account + .root_history + .iter() + .filter(|root| **root != [0u8; 32]) + .copied() + .collect(); + + // Expected number of non-zero roots = number of updates since last zeroing + let expected_non_zero = + old_account.root_history.len() - num_remaining_roots as usize; + println!("num_remaining_roots {}", num_remaining_roots); + assert_eq!( + non_zero_roots.len(), + expected_non_zero, + "Expected {} non-zero roots after output queue zeroing, but found {}. Root history: {:?}", + expected_non_zero, + non_zero_roots.len(), + old_account.root_history + ); + + // Assert that all remaining non-zero roots are tracked in the current (non-zeroed) batch + let current_batch_idx = old_account.queue_batches.pending_batch_index as u32; + if let Some(current_batch_roots) = batch_roots.get_by_key(¤t_batch_idx) { + for non_zero_root in &non_zero_roots { + // Skip the initial root which might not be tracked in any batch + if old_account.sequence_number > 0 { + assert!( + current_batch_roots.contains(non_zero_root), + "Non-zero root {:?} should be tracked in current batch {} but wasn't found. Current batch roots: {:?}", + non_zero_root, + current_batch_idx, + current_batch_roots + ); + } + } + + // Also verify the count matches + assert_eq!( + current_batch_roots.len(), + non_zero_roots.len(), + "Current batch {} should have {} roots tracked, but has {}", + current_batch_idx, + non_zero_roots.len(), + current_batch_roots.len() + ); + } + } + } + } + + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(account, old_account); + assert_eq!(*account.root_history.last().unwrap(), root); +} + +pub fn assert_address_merkle_tree_update( + mut old_account: BatchedMerkleTreeAccount, + account: BatchedMerkleTreeAccount, + root: [u8; 32], + batch_roots: &[(u32, Vec<[u8; 32]>)], +) { + { + // Input queue update + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let history_capacity = old_account.root_history.capacity(); + let pre_roots = old_account.root_history.to_vec(); + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + + old_full_batch + .mark_as_inserted_in_merkle_tree( + account.sequence_number, + account.root_history.last_index() as u32, + history_capacity as u32, + ) + .unwrap(); + if old_full_batch.get_state() == BatchState::Inserted { + old_account.queue_batches.pending_batch_index += 1; + old_account.queue_batches.pending_batch_index %= 2; + } + + // Increment sequence number and push root here to match real implementation + // (verify_update increments and pushes before zero_out_previous_batch_bloom_filter) + old_account.sequence_number += 1; + old_account.root_history.push(root); + + // Calculate expected_first_zero_index after push to match real implementation + let expected_first_zero_index = old_account.root_history.first_index(); + + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + + let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; + + let old_full_batch_index = old_account.queue_batches.pending_batch_index; + let old_full_batch = old_account + .queue_batches + .batches + .get_mut(old_full_batch_index as usize) + .unwrap(); + let current_seq = account.sequence_number; + let root_history_len = account.root_history_capacity as u64; + let state_seq = account.queue_batches.batches[previous_full_batch_index].sequence_number; + let no_insert_since_last_batch_root = + state_seq.saturating_sub(root_history_len) == current_seq; + println!( + "previous_batch_is_inserted{}", + old_full_batch.get_state() != BatchState::Inserted + ); + println!( + "no_insert_since_last_batch_root {}", + no_insert_since_last_batch_root + ); + let zeroed_batch_this_tx = old_full_batch.get_num_inserted_elements() + >= old_full_batch.batch_size / 2 + && old_full_batch.get_state() != BatchState::Inserted + && !no_insert_since_last_batch_root + && !old_full_batch.bloom_filter_is_zeroed(); + println!("zeroed_batch_this_tx: {:?}", zeroed_batch_this_tx); + let state = account.queue_batches.batches[previous_full_batch_index].get_state(); + let previous_batch = old_account + .queue_batches + .batches + .get_mut(previous_full_batch_index) + .unwrap(); + + if zeroed_batch_this_tx && state == BatchState::Inserted { + previous_batch.set_bloom_filter_to_zeroed(); + let sequence_number = previous_batch.sequence_number; + let overlapping_roots_exits = sequence_number > old_account.sequence_number; + if overlapping_roots_exits { + old_account.bloom_filter_stores[previous_full_batch_index] + .iter_mut() + .for_each(|elem| { + *elem = 0; + }); + + let mut oldest_root_index = old_account.root_history.first_index(); + + let num_remaining_roots = sequence_number - old_account.sequence_number; + for _ in 0..num_remaining_roots { + println!("zeroing out root index: {}", oldest_root_index); + old_account.root_history[oldest_root_index] = [0u8; 32]; + oldest_root_index += 1; + oldest_root_index %= old_account.root_history.len(); + } + println!( + "pre roots {:?}", + pre_roots + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + + println!( + "post roots (actual account) {:?}", + account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + // No roots of the zeroed batch exist in the root history + if let Some((_idx, zeroed_batch_roots)) = batch_roots + .iter() + .find(|(idx, _)| *idx == previous_full_batch_index as u32) + { + for root in zeroed_batch_roots { + println!("checking root {:?}", root); + assert!( + !account.root_history.iter().any(|r| r == root), + "Zeroed batch root {:?} still exists in root_history", + root + ); + } + } + // All non-zero roots in the root history belong to the current batch + let current_batch_index = old_full_batch_index as u32; + if let Some((_idx, current_batch_roots)) = batch_roots + .iter() + .find(|(idx, _)| *idx == current_batch_index) + { + // Assert 1: All non-zero roots belong to current batch + for root in account.root_history.iter() { + if *root != [0u8; 32] { + assert!( + current_batch_roots.contains(root), + "Non-zero root {:?} in root_history does not belong to current batch {}", + root, + current_batch_index + ); + } + } + + // Assert 2: All current batch roots are present in root_history + for root in current_batch_roots { + assert!( + account.root_history.iter().any(|r| r == root), + "Current batch root {:?} is missing from root_history", + root + ); + } + + // Assert 3: Count matches + let non_zero_count = account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .count(); + assert_eq!( + non_zero_count, + current_batch_roots.len(), + "Expected {} non-zero roots (current batch size), found {}", + current_batch_roots.len(), + non_zero_count + ); + + // Assert 4: No duplicates + let non_zero_roots: Vec<_> = account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect(); + let unique_roots: HashSet<_> = non_zero_roots.iter().collect(); + assert_eq!( + non_zero_roots.len(), + unique_roots.len(), + "Duplicate roots found in root_history" + ); + + // Assert 5: Roots are contiguous from the end + let last_idx = account.root_history.last_index(); + let capacity = account.root_history.capacity(); + let mut contiguous_count = 0; + let mut idx = last_idx; + + for _ in 0..capacity { + if account.root_history[idx] != [0u8; 32] { + contiguous_count += 1; + assert!( + current_batch_roots.contains(&account.root_history[idx]), + "Root at index {} is not from current batch", + idx + ); + } else { + break; + } + idx = if idx == 0 { capacity - 1 } else { idx - 1 }; + } + + assert_eq!( + contiguous_count, + current_batch_roots.len(), + "Expected {} contiguous roots from last_index, found {}", + current_batch_roots.len(), + contiguous_count + ); + } + + // Assert 6: Verify zeroed batch metadata + let zeroed_batch = &account.queue_batches.batches[previous_full_batch_index]; + assert!( + zeroed_batch.bloom_filter_is_zeroed(), + "Zeroed batch should have bloom_filter_is_zeroed set to true" + ); + assert_eq!( + zeroed_batch.get_state(), + BatchState::Inserted, + "Zeroed batch should be in Inserted state" + ); + + // Assert 7: Verify bloom filter bytes are all zero + let zeroed_bloom_filter = &account.bloom_filter_stores[previous_full_batch_index]; + assert!( + zeroed_bloom_filter.iter().all(|&b| b == 0), + "All bloom filter bytes should be zero after zeroing" + ); + + // Assert 8: Verify root at batch.root_index is zeroed + let zeroed_root_index = zeroed_batch.root_index as usize; + assert_eq!( + account.root_history[zeroed_root_index], [0u8; 32], + "Root at batch.root_index {} should be zeroed", + zeroed_root_index + ); + + // Assert 9: Verify first safe root (root_index + 1) must not be zero and belongs to current batch + let first_safe_index = + (zeroed_root_index + 1) % account.root_history_capacity as usize; + let first_safe_root = account.root_history[first_safe_index]; + assert_ne!( + first_safe_root, [0u8; 32], + "First safe root at index {} should NOT be zeroed", + first_safe_index + ); + let (_idx, current_batch_roots) = batch_roots + .iter() + .find(|(idx, _)| *idx == current_batch_index) + .expect("Current batch should exist in batch_roots"); + assert!( + current_batch_roots.contains(&first_safe_root), + "First safe root at index {} should belong to current batch, found {:?}", + first_safe_index, + first_safe_root + ); + + // Assert 10: Verify sequence number relationship + assert!( + sequence_number > old_account.sequence_number, + "Batch sequence_number {} should be > account sequence_number {}", + sequence_number, + old_account.sequence_number + ); + assert_eq!( + num_remaining_roots, + sequence_number - old_account.sequence_number, + "num_remaining_roots calculation mismatch" + ); + + // Assert 11: Verify we zeroed the exact expected range + println!( + "Assert 11: expected_first_zero_index = {}, num_remaining_roots = {}", + expected_first_zero_index, num_remaining_roots + ); + let mut actual_zeroed_indices = Vec::new(); + for i in 0..account.root_history_capacity as usize { + if account.root_history[i] == [0u8; 32] { + actual_zeroed_indices.push(i); + } + } + println!( + "Assert 11: actual zeroed indices in account.root_history: {:?}", + actual_zeroed_indices + ); + + for i in 0..num_remaining_roots { + let idx = (expected_first_zero_index + i as usize) + % account.root_history_capacity as usize; + println!( + "Assert 11: checking index {} (i={}), value: {:?}", + idx, + i, + &account.root_history[idx][0..4] + ); + assert_eq!( + account.root_history[idx], [0u8; 32], + "Root at index {} should be zeroed (part of zeroing range)", + idx + ); + } + } + } + } + + old_account.next_index += old_account.queue_batches.zkp_batch_size; + println!( + "post roots (old_account simulation) {:?}", + old_account + .root_history + .iter() + .filter(|r| **r != [0u8; 32]) + .cloned() + .collect::>() + ); + assert_eq!(account.get_metadata(), old_account.get_metadata()); + assert_eq!(*account.root_history.last().unwrap(), root); + assert_eq!(account, old_account); +} + +pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { + let mut rnd_bytes = rng.gen::<[u8; 32]>(); + rnd_bytes[0] = 0; + rnd_bytes +} + +pub async fn perform_input_update( + mt_account_data: &mut [u8], + mock_indexer: &mut MockBatchedForester<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>, + enable_assert: bool, + mt_pubkey: Pubkey, + batch_roots: &mut ArrayMap, 2>, +) { + let mut cloned_mt_account_data = (*mt_account_data).to_vec(); + let old_account = BatchedMerkleTreeAccount::state_from_bytes( + cloned_mt_account_data.as_mut_slice(), + &mt_pubkey, + ) + .unwrap(); + let (input_res, root) = { + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_input_queue(instruction_data), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); + if enable_assert { + assert_merkle_tree_update(old_account, account, None, None, root, batch_roots); + } +} +// Get random leaf that is not in the input queue. +pub fn get_random_leaf(rng: &mut StdRng, active_leaves: &mut Vec<[u8; 32]>) -> (usize, [u8; 32]) { + if active_leaves.is_empty() { + return (0, [0u8; 32]); + } + let index = rng.gen_range(0..active_leaves.len()); + // get random leaf from vector and remove it + (index, active_leaves.remove(index)) +} +#[allow(clippy::too_many_arguments)] +pub fn assert_nullifier_queue_insert( + pre_account: BatchedMerkleTreeMetadata, + pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], + pre_roots: Vec<[u8; 32]>, + pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + merkle_tree_account: BatchedMerkleTreeAccount, + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_indices: Vec, + tx_hash: [u8; 32], + input_is_in_tree: Vec, + array_indices: Vec, + current_slot: &u64, +) -> Result<(), BatchedMerkleTreeError> { + let mut leaf_hash_chain_insert_values = vec![]; + for (insert_value, leaf_index) in bloom_filter_insert_values.iter().zip(leaf_indices.iter()) { + let nullifier = + Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) + .unwrap(); + leaf_hash_chain_insert_values.push(nullifier); + } + assert_input_queue_insert( + pre_account, + pre_value_vecs, + pre_roots, + pre_hash_chains, + merkle_tree_account, + bloom_filter_insert_values, + leaf_hash_chain_insert_values, + input_is_in_tree, + array_indices, + current_slot, + ) +} +/// Insert into input queue: +/// 1. New value exists in the current batch bloom_filter +/// 2. New value does not exist in the other batch bloom_filters +#[allow(clippy::too_many_arguments)] +pub fn assert_input_queue_insert( + mut pre_account: BatchedMerkleTreeMetadata, + pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], + pre_roots: Vec<[u8; 32]>, + mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + mut merkle_tree_account: BatchedMerkleTreeAccount, + bloom_filter_insert_values: Vec<[u8; 32]>, + leaf_hash_chain_insert_values: Vec<[u8; 32]>, + input_is_in_tree: Vec, + array_indices: Vec, + current_slot: &u64, +) -> Result<(), BatchedMerkleTreeError> { + let mut should_be_zeroed = false; + for (i, insert_value) in bloom_filter_insert_values.iter().enumerate() { + if !input_is_in_tree[i] { + let value_vec_index = array_indices[i]; + assert!( + pre_value_vecs.iter_mut().any(|value_vec| { + if value_vec.len() > value_vec_index { + { + if value_vec[value_vec_index] == *insert_value { + value_vec[value_vec_index] = [0u8; 32]; + true + } else { + false + } + } + } else { + false + } + }), + "Value not in value vec." + ); + } + + let post_roots: Vec<[u8; 32]> = merkle_tree_account.root_history.iter().cloned().collect(); + // if root buffer changed it must be only overwritten by [0u8;32] + if post_roots != pre_roots { + let only_zero_overwrites = post_roots + .iter() + .zip(pre_roots.iter()) + .all(|(post, pre)| *post == *pre || *post == [0u8; 32]); + println!("pre_roots: {:?}", pre_roots); + println!("post_roots: {:?}", post_roots); + if !only_zero_overwrites { + panic!("Root buffer changed.") + } + } + + let inserted_batch_index = + pre_account.queue_batches.currently_processing_batch_index as usize; + let expected_batch = pre_account + .queue_batches + .batches + .get_mut(inserted_batch_index) + .unwrap(); + + pre_account.queue_batches.next_index += 1; + + println!( + "assert input queue batch update: expected_batch: {:?}", + expected_batch + ); + println!( + "assert input queue batch update: expected_batch.get_num_inserted_elements(): {}", + expected_batch.get_num_inserted_elements() + ); + println!( + "assert input queue batch update: expected_batch.batch_size / 2: {}", + expected_batch.batch_size / 2 + ); + + if !should_be_zeroed && expected_batch.get_state() == BatchState::Inserted { + should_be_zeroed = + expected_batch.get_num_inserted_elements() == expected_batch.batch_size / 2; + } + println!( + "assert input queue batch update: should_be_zeroed: {}", + should_be_zeroed + ); + if expected_batch.get_state() == BatchState::Inserted { + println!("assert input queue batch update: clearing batch"); + pre_hash_chains[inserted_batch_index].clear(); + expected_batch.advance_state_to_fill(None).unwrap(); + expected_batch.set_start_slot(current_slot); + println!("setting start slot to {}", current_slot); + } else if expected_batch.get_state() == BatchState::Fill + && !expected_batch.start_slot_is_set() + { + // Batch is filled for the first time + expected_batch.set_start_slot(current_slot); + } + println!( + "assert input queue batch update: inserted_batch_index: {}", + inserted_batch_index + ); + // New value exists in the current batch bloom filter + let mut bloom_filter = BloomFilter::new( + merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize, + merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity, + merkle_tree_account.bloom_filter_stores[inserted_batch_index], + ) + .unwrap(); + println!( + "assert input queue batch update: insert_value: {:?}", + insert_value + ); + assert!(bloom_filter.contains(insert_value)); + let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); + expected_batch.add_to_hash_chain(&leaf_hash_chain_insert_values[i], pre_hash_chain)?; + + let num_iters = + merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize; + let bloom_filter_capacity = + merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity; + // New value does not exist in the other batch bloom_filters + for (i, store) in merkle_tree_account + .bloom_filter_stores + .iter_mut() + .enumerate() + { + // Skip current batch it is already checked above + if i != inserted_batch_index { + let mut bloom_filter = + BloomFilter::new(num_iters, bloom_filter_capacity, store).unwrap(); + assert!(!bloom_filter.contains(insert_value)); + } + } + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { + assert_eq!( + merkle_tree_account.queue_batches.batches + [pre_account.queue_batches.currently_processing_batch_index as usize] + .get_state(), + BatchState::Full + ); + pre_account.queue_batches.currently_processing_batch_index += 1; + pre_account.queue_batches.currently_processing_batch_index %= + pre_account.queue_batches.num_batches; + assert_eq!( + merkle_tree_account.queue_batches.batches[inserted_batch_index], + *expected_batch + ); + assert_eq!( + merkle_tree_account.hash_chain_stores[inserted_batch_index] + .last() + .unwrap(), + pre_hash_chain.last().unwrap(), + "Hashchain store inconsistent." + ); + } + } + + assert_eq!( + *merkle_tree_account.get_metadata(), + pre_account, + "BatchedMerkleTreeMetadata changed." + ); + let inserted_batch_index = pre_account.queue_batches.currently_processing_batch_index as usize; + let mut expected_batch = pre_account.queue_batches.batches[inserted_batch_index]; + if should_be_zeroed { + expected_batch.set_bloom_filter_to_zeroed(); + } + assert_eq!( + merkle_tree_account.queue_batches.batches[inserted_batch_index], + expected_batch + ); + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert_eq!( + merkle_tree_account.queue_batches.batches[other_batch], + pre_account.queue_batches.batches[other_batch] + ); + assert_eq!( + merkle_tree_account.hash_chain_stores, pre_hash_chains, + "Hashchain store inconsistent." + ); + Ok(()) +} + +/// Expected behavior for insert into output queue: +/// - add value to value array +/// - batch.num_inserted += 1 +/// - if batch is full after insertion advance state to ReadyToUpdateTree +pub fn assert_output_queue_insert( + mut pre_account: BatchedQueueMetadata, + // mut pre_batches: Vec, + mut pre_value_store: [ZeroCopyVecU64<[u8; 32]>; 2], + mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], + mut output_account: BatchedQueueAccount, + insert_values: Vec<[u8; 32]>, + current_slot: u64, +) -> Result<(), BatchedMerkleTreeError> { + for batch in output_account.batch_metadata.batches.iter_mut() { + println!("output_account.batch: {:?}", batch); + } + for batch in pre_account.batch_metadata.batches.iter() { + println!("pre_batch: {:?}", batch); + } + for insert_value in insert_values.iter() { + // if the currently processing batch changed it should + // increment by one and the old batch should be ready to + // update + + let inserted_batch_index = + pre_account.batch_metadata.currently_processing_batch_index as usize; + let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; + let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); + let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); + if expected_batch.get_state() == BatchState::Inserted { + expected_batch + .advance_state_to_fill(Some(pre_account.batch_metadata.next_index)) + .unwrap(); + pre_value_store.clear(); + pre_hash_chain.clear(); + } + pre_account.batch_metadata.next_index += 1; + expected_batch.store_and_hash_value( + insert_value, + pre_value_store, + pre_hash_chain, + ¤t_slot, + )?; + + let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; + assert!(output_account.value_vecs[inserted_batch_index] + .as_mut_slice() + .to_vec() + .contains(insert_value)); + assert!(!output_account.value_vecs[other_batch] + .as_mut_slice() + .to_vec() + .contains(insert_value)); + if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { + assert_eq!( + output_account.batch_metadata.batches + [pre_account.batch_metadata.currently_processing_batch_index as usize] + .get_state(), + BatchState::Full + ); + pre_account.batch_metadata.currently_processing_batch_index += 1; + pre_account.batch_metadata.currently_processing_batch_index %= + pre_account.batch_metadata.num_batches; + assert_eq!( + output_account.batch_metadata.batches[inserted_batch_index], + *expected_batch + ); + } + } + assert_eq!( + *output_account.get_metadata(), + pre_account, + "BatchedQueueAccount changed." + ); + assert_eq!(pre_hash_chains, output_account.hash_chain_stores); + for (i, (value_store, pre)) in output_account + .value_vecs + .iter() + .zip(pre_value_store.iter()) + .enumerate() + { + for (j, (value, pre_value)) in value_store.iter().zip(pre.iter()).enumerate() { + assert_eq!( + *value, *pre_value, + "{} {} \n value store {:?}\n pre {:?}", + i, j, value_store, pre + ); + } + } + assert_eq!(pre_value_store, output_account.value_vecs); + Ok(()) +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs new file mode 100644 index 0000000000..273099f2f7 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/simulate_txs.rs @@ -0,0 +1,555 @@ +#![allow(unused_assignments)] +use std::cmp::min; + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + constants::{ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_STATE_TREE_HEIGHT}, + errors::BatchedMerkleTreeError, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, InitStateTreeAccountsInstructionData, + }, + merkle_tree::{ + assert_batch_append_event_event, assert_nullify_event, + test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, + InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, + }, + queue::{test_utils::get_output_queue_account_size_default, BatchedQueueAccount}, +}; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_hasher::Poseidon; +use light_merkle_tree_reference::MerkleTree; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{MockBatchedForester, MockTxEvent}; +use rand::{rngs::StdRng, Rng}; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[derive(Debug, PartialEq, Clone)] +pub struct MockTransactionInputs { + inputs: Vec<[u8; 32]>, + outputs: Vec<[u8; 32]>, +} +pub fn simulate_transaction( + instruction_data: MockTransactionInputs, + merkle_tree_account_data: &mut [u8], + output_queue_account_data: &mut [u8], + reference_merkle_tree: &MerkleTree, + current_slot: &mut u64, + mt_pubkey: &Pubkey, +) -> Result { + let mut output_account = + BatchedQueueAccount::output_from_bytes(output_queue_account_data).unwrap(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(merkle_tree_account_data, mt_pubkey).unwrap(); + let flattened_inputs = instruction_data + .inputs + .iter() + .cloned() + .chain(instruction_data.outputs.iter().cloned()) + .collect::>(); + let tx_hash = create_hash_chain_from_slice(flattened_inputs.as_slice())?; + + for input in instruction_data.inputs.iter() { + // zkp inclusion in Merkle tree + let inclusion = reference_merkle_tree.get_leaf_index(input); + let leaf_index = if let Some(leaf_index) = inclusion { + leaf_index as u64 + } else { + println!("simulate_transaction: inclusion is none"); + let mut included = false; + let mut leaf_index = 0; + let start_indices = output_account + .batch_metadata + .batches + .iter() + .map(|batch| batch.start_index) + .collect::>(); + + for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { + for (value_index, value) in value_vec.iter_mut().enumerate() { + if *value == *input { + let batch_start_index = start_indices[batch_index]; + included = true; + println!("overwriting value: {:?}", value); + *value = [0u8; 32]; + leaf_index = value_index as u64 + batch_start_index; + } + } + } + if !included { + panic!("Value not included in any output queue or trees."); + } + leaf_index + }; + + println!( + "sim tx input: \n {:?} \nleaf index : {:?}, \ntx hash {:?}", + input, leaf_index, tx_hash, + ); + merkle_tree_account.insert_nullifier_into_queue( + input, + leaf_index, + &tx_hash, + current_slot, + )?; + } + + for output in instruction_data.outputs.iter() { + let leaf_index = output_account.batch_metadata.next_index; + println!( + "sim tx output: \n {:?} \nleaf index : {:?}", + output, leaf_index + ); + output_account.insert_into_current_batch(output, current_slot)?; + } + Ok(MockTxEvent { + inputs: instruction_data.inputs.clone(), + outputs: instruction_data.outputs.clone(), + tx_hash, + }) +} + +#[serial] +#[tokio::test] +async fn test_simulate_transactions() { + spawn_prover().await; + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let num_tx = 2200; + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_default(); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_merkle_tree_account_size_default(); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = ACCOUNT_COMPRESSION_PROGRAM_ID.into(); + + let params = InitStateTreeAccountsInstructionData::test_default(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + let mut in_ready_for_update = false; + let mut out_ready_for_update = false; + let mut num_output_updates = 0; + let mut num_input_updates = 0; + let mut num_input_values = 0; + let mut num_output_values = 0; + let mut current_slot = rng.gen(); + + // Track roots created during each batch insertion (batch_index -> roots) + let mut batch_roots: ArrayMap, 2> = ArrayMap::new(); + + // Track the initial root for batch 0 + // For StateV2 trees, this is the zero bytes root for the tree height + { + let initial_root = + light_hasher::Poseidon::zero_bytes()[DEFAULT_BATCH_STATE_TREE_HEIGHT as usize]; + use light_hasher::Hasher; + batch_roots.insert(0, vec![initial_root], ()).unwrap(); + println!("Initial root {:?} tracked for batch 0", initial_root); + } + + for tx in 0..num_tx { + println!("tx: {}", tx); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_updates: {}", num_output_updates); + { + println!("Simulate tx {} -----------------------------", tx); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let number_of_outputs = rng.gen_range(0..7); + let mut outputs = vec![]; + for _ in 0..number_of_outputs { + outputs.push(get_rnd_bytes(&mut rng)); + } + let number_of_inputs = if rng.gen_bool(0.5) { + if !mock_indexer.active_leaves.is_empty() { + let x = min(mock_indexer.active_leaves.len(), 5); + rng.gen_range(0..x) + } else { + 0 + } + } else { + 0 + }; + + let mut inputs = vec![]; + let mut input_is_in_tree = vec![]; + let mut leaf_indices = vec![]; + let mut array_indices = vec![]; + let mut retries = min(10, mock_indexer.active_leaves.len()); + while inputs.len() < number_of_inputs && retries > 0 { + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let inserted = mock_indexer.merkle_tree.get_leaf_index(&leaf); + if let Some(leaf_index) = inserted { + inputs.push(leaf); + leaf_indices.push(leaf_index as u64); + input_is_in_tree.push(true); + array_indices.push(0); + } else if rng.gen_bool(0.1) { + inputs.push(leaf); + let output_queue = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data) + .unwrap(); + let mut leaf_array_index = 0; + let mut batch_index = 0; + for (i, vec) in output_queue.value_vecs.iter().enumerate() { + let pos = vec.iter().position(|value| *value == leaf); + if let Some(pos) = pos { + leaf_array_index = pos; + batch_index = i; + break; + } + if i == output_queue.value_vecs.len() - 1 { + panic!("Leaf not found in output queue."); + } + } + let batch = output_queue + .batch_metadata + .batches + .get(batch_index) + .unwrap(); + array_indices.push(leaf_array_index); + let leaf_index: u64 = batch.start_index + leaf_array_index as u64; + leaf_indices.push(leaf_index); + input_is_in_tree.push(false); + } + retries -= 1; + } + let number_of_inputs = inputs.len(); + println!("number_of_inputs: {}", number_of_inputs); + + let instruction_data = MockTransactionInputs { + inputs: inputs.clone(), + outputs: outputs.clone(), + }; + + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!( + "input queue: {:?}", + merkle_tree_account.queue_batches.batches[0].get_num_inserted_zkp_batch() + ); + + let mut pre_mt_data = mt_account_data.clone(); + let mut pre_account_bytes = output_queue_account_data.clone(); + + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); + let pre_output_metadata = *pre_output_account.get_metadata(); + let mut pre_output_value_stores = pre_output_account.value_vecs; + let pre_output_hash_chains = pre_output_account.hash_chain_stores; + + let mut pre_mt_account_bytes = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes, &mt_pubkey) + .unwrap(); + let pre_mt_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_mt_hash_chains = pre_merkle_tree_account.hash_chain_stores; + + if !outputs.is_empty() || !inputs.is_empty() { + println!("Simulating tx with inputs: {:?}", instruction_data); + let event = simulate_transaction( + instruction_data, + &mut pre_mt_data, + &mut output_queue_account_data, + &mock_indexer.merkle_tree, + &mut current_slot, + &mt_pubkey, + ) + .unwrap(); + mock_indexer.tx_events.push(event.clone()); + + if !inputs.is_empty() { + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) + .unwrap(); + println!("inputs: {:?}", inputs); + assert_nullifier_queue_insert( + pre_mt_account, + &mut pre_output_value_stores, // mut to remove values proven by index + pre_roots, + pre_mt_hash_chains, + merkle_tree_account, + inputs.clone(), + leaf_indices.clone(), + event.tx_hash, + input_is_in_tree, + array_indices, + ¤t_slot, + ) + .unwrap(); + } + + if !outputs.is_empty() { + assert_output_queue_insert( + pre_output_metadata, + pre_output_value_stores, + pre_output_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + outputs.clone(), + current_slot, + ) + .unwrap(); + } + + for i in 0..number_of_inputs { + mock_indexer + .input_queue_leaves + .push((inputs[i], leaf_indices[i] as usize)); + } + for output in outputs.iter() { + mock_indexer.active_leaves.push(*output); + mock_indexer.output_queue_leaves.push(*output); + } + + num_output_values += number_of_outputs; + num_input_values += number_of_inputs; + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) + .unwrap(); + in_ready_for_update = merkle_tree_account + .queue_batches + .batches + .iter() + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + out_ready_for_update = output_account + .batch_metadata + .batches + .iter() + .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); + + mt_account_data = pre_mt_data.clone(); + } else { + println!("Skipping simulate tx for no inputs or outputs"); + } + current_slot += 1; + } + + if in_ready_for_update && rng.gen_bool(1.0) { + println!("Input update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + let mut pre_mt_account_data = mt_account_data.clone(); + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let (input_res, new_root) = { + let mut account = BatchedMerkleTreeAccount::state_from_bytes( + &mut pre_mt_account_data, + &mt_pubkey, + ) + .unwrap(); + println!("batches {:?}", account.queue_batches.batches); + + let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; + let batch = account + .queue_batches + .batches + .get(next_full_batch as usize) + .unwrap(); + println!( + "account + .hash_chain_stores {:?}", + account.hash_chain_stores + ); + println!("hash_chain store len {:?}", account.hash_chain_stores.len()); + println!( + "batch.get_num_inserted_zkps() as usize {:?}", + batch.get_num_inserted_zkps() as usize + ); + let leaves_hash_chain = account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + + let (proof, new_root) = mock_indexer + .get_batched_update_proof( + account.get_metadata().queue_batches.zkp_batch_size as u32, + *leaves_hash_chain, + ) + .await + .unwrap(); + let instruction_data = InstructionDataBatchNullifyInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + ( + account.update_tree_from_input_queue(instruction_data), + new_root, + ) + }; + println!("Input update -----------------------------"); + println!("res {:?}", input_res); + assert!(input_res.is_ok()); + let nullify_event = input_res.unwrap(); + in_ready_for_update = false; + // assert Merkle tree + // sequence number increased X + // next index increased X + // current root index increased X + // One root changed one didn't + + let account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + assert_nullify_event(nullify_event, new_root, &old_account, mt_pubkey); + assert_merkle_tree_update(old_account, account, None, None, new_root, &mut batch_roots); + mt_account_data = pre_mt_account_data.clone(); + + num_input_updates += 1; + } + + if out_ready_for_update && rng.gen_bool(1.0) { + println!("Output update -----------------------------"); + println!("Num inserted values: {}", num_input_values); + println!("Num input updates: {}", num_input_updates); + println!("Num output updates: {}", num_output_updates); + println!("Num output values: {}", num_output_values); + + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + let mut pre_output_queue_state = output_queue_account_data.clone(); + println!("Output update -----------------------------"); + + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + println!("output_res: {:?}", output_res); + assert!(output_res.is_ok()); + let batch_append_event = output_res.unwrap(); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let old_output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let old_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + assert_batch_append_event_event( + batch_append_event, + new_root, + &old_output_account, + &old_account, + mt_pubkey, + ); + assert_merkle_tree_update( + old_account, + account, + Some(old_output_account), + Some(output_account), + new_root, + &mut batch_roots, + ); + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + out_ready_for_update = false; + num_output_updates += 1; + } + } + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); + println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); + println!("num_output_updates: {}", num_output_updates); + println!("num_input_updates: {}", num_input_updates); + println!("num_output_values: {}", num_output_values); + println!("num_input_values: {}", num_input_values); +} diff --git a/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs b/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs new file mode 100644 index 0000000000..ab45fdb133 --- /dev/null +++ b/program-tests/batched-merkle-tree-test/tests/e2e_tests/state.rs @@ -0,0 +1,468 @@ +#![allow(unused_assignments)] + +use light_array_map::ArrayMap; +use light_batched_merkle_tree::{ + batch::BatchState, + constants::{DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES}, + errors::BatchedMerkleTreeError, + initialize_state_tree::{ + init_batched_state_merkle_tree_accounts, + test_utils::get_state_merkle_tree_account_size_from_params, + InitStateTreeAccountsInstructionData, + }, + merkle_tree::{BatchedMerkleTreeAccount, InstructionDataBatchAppendInputs}, + queue::{test_utils::get_output_queue_account_size_from_params, BatchedQueueAccount}, +}; +use light_bloom_filter::BloomFilter; +use light_compressed_account::{ + hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, + pubkey::Pubkey, +}; +use light_prover_client::prover::spawn_prover; +use light_test_utils::mock_batched_forester::{MockBatchedForester, MockTxEvent}; +use rand::rngs::StdRng; +use serial_test::serial; + +use crate::e2e_tests::shared::*; + +#[serial] +#[tokio::test] +async fn test_fill_state_queues_completely() { + spawn_prover().await; + let mut current_slot = 1; + let roothistory_capacity = vec![17, 80]; + for root_history_capacity in roothistory_capacity { + let mut mock_indexer = + MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); + + let mut params = InitStateTreeAccountsInstructionData::test_default(); + params.output_queue_batch_size = params.input_queue_batch_size * 10; + // Root history capacity which is greater than the input updates + params.root_history_capacity = root_history_capacity; + + let owner = Pubkey::new_unique(); + + let queue_account_size = get_output_queue_account_size_from_params(params); + + let mut output_queue_account_data = vec![0; queue_account_size]; + let output_queue_pubkey = Pubkey::new_unique(); + + let mt_account_size = get_state_merkle_tree_account_size_from_params(params); + let mut mt_account_data = vec![0; mt_account_size]; + let mt_pubkey = Pubkey::new_unique(); + + let merkle_tree_rent = 1_000_000_000; + let queue_rent = 1_000_000_000; + let additional_bytes_rent = 1000; + + init_batched_state_merkle_tree_accounts( + owner, + params, + &mut output_queue_account_data, + output_queue_pubkey, + queue_rent, + &mut mt_account_data, + mt_pubkey, + merkle_tree_rent, + additional_bytes_rent, + ) + .unwrap(); + use rand::SeedableRng; + let mut rng = StdRng::seed_from_u64(0); + + // Track roots created during each batch insertion (batch_index -> roots) + let mut batch_roots: ArrayMap, 2> = ArrayMap::new(); + + let num_tx = NUM_BATCHES as u64 * params.output_queue_batch_size; + + // Fill up complete output queue + for _ in 0..num_tx { + // Output queue + + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut pre_output_queue_account_data = output_queue_account_data.clone(); + let pre_output_account = + BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); + let pre_account = *pre_output_account.get_metadata(); + let pre_value_store = pre_output_account.value_vecs; + let pre_hash_chains = pre_output_account.hash_chain_stores; + + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + output_account + .insert_into_current_batch(&rnd_bytes, ¤t_slot) + .unwrap(); + assert_output_queue_insert( + pre_account, + pre_value_store, + pre_hash_chains, + BatchedQueueAccount::output_from_bytes( + &mut output_queue_account_data.clone(), // clone so that data cannot be modified + ) + .unwrap(), + vec![rnd_bytes], + current_slot, + ) + .unwrap(); + current_slot += 1; + mock_indexer.output_queue_leaves.push(rnd_bytes); + } + let rnd_bytes = get_rnd_bytes(&mut rng); + let mut output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + + let result = output_account.insert_into_current_batch(&rnd_bytes, ¤t_slot); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + + output_account + .batch_metadata + .batches + .iter() + .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); + + // Batch insert output queue into merkle tree. + for _ in 0..output_account + .get_metadata() + .batch_metadata + .get_num_zkp_batches() + { + println!("Output update -----------------------------"); + let mut pre_mt_account_data = mt_account_data.clone(); + let mut account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let mut pre_output_queue_state = output_queue_account_data.clone(); + let output_account = + BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); + let next_index = account.get_metadata().next_index; + let next_full_batch = output_account + .get_metadata() + .batch_metadata + .pending_batch_index; + let batch = output_account + .batch_metadata + .batches + .get(next_full_batch as usize) + .unwrap(); + let leaves = mock_indexer.output_queue_leaves.clone(); + let leaves_hash_chain = output_account + .hash_chain_stores + .get(next_full_batch as usize) + .unwrap() + .get(batch.get_num_inserted_zkps() as usize) + .unwrap(); + let (proof, new_root) = mock_indexer + .get_batched_append_proof( + next_index as usize, + batch.get_num_inserted_zkps() as u32, + batch.zkp_batch_size as u32, + *leaves_hash_chain, + batch.get_num_zkp_batches() as u32, + ) + .await + .unwrap(); + let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; + let end = start + batch.zkp_batch_size as usize; + for leaf in &leaves[start..end] { + // Storing the leaf in the output queue indexer so that it + // can be inserted into the input queue later. + mock_indexer.active_leaves.push(*leaf); + } + + let instruction_data = InstructionDataBatchAppendInputs { + new_root, + compressed_proof: CompressedProof { + a: proof.a, + b: proof.b, + c: proof.c, + }, + }; + + println!("Output update -----------------------------"); + let queue_account = + &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); + let output_res = + account.update_tree_from_output_queue_account(queue_account, instruction_data); + assert!(output_res.is_ok()); + + assert_eq!( + *account.root_history.last().unwrap(), + mock_indexer.merkle_tree.root() + ); + + // Track root for this batch + let batch_idx = next_full_batch as u32; + if let Some(roots) = batch_roots.get_mut_by_key(&batch_idx) { + roots.push(new_root); + } else { + batch_roots.insert(batch_idx, vec![new_root], ()).unwrap(); + } + + output_queue_account_data = pre_output_queue_state; + mt_account_data = pre_mt_account_data; + } + + // Fill up complete input queue. + let num_tx = NUM_BATCHES as u64 * params.input_queue_batch_size; + let mut first_value = [0u8; 32]; + for tx in 0..num_tx { + println!("Input insert ----------------------------- {}", tx); + let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); + let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); + + let mut pre_mt_account_data = mt_account_data.clone(); + let pre_merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) + .unwrap(); + let pre_account = *pre_merkle_tree_account.get_metadata(); + let pre_roots = pre_merkle_tree_account + .root_history + .iter() + .cloned() + .collect(); + let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; + let tx_hash = create_hash_chain_from_slice(&[leaf]).unwrap(); + // Index input queue insert event + mock_indexer.input_queue_leaves.push((leaf, leaf_index)); + mock_indexer.tx_events.push(MockTxEvent { + inputs: vec![leaf], + outputs: vec![], + tx_hash, + }); + println!("leaf {:?}", leaf); + println!("leaf_index {:?}", leaf_index); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + merkle_tree_account + .insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ) + .unwrap(); + println!("current slot {:?}", current_slot); + assert_nullifier_queue_insert( + pre_account, + &mut [], + pre_roots, + pre_hash_chains, + merkle_tree_account, + vec![leaf], + vec![leaf_index as u64], + tx_hash, + vec![true], + vec![], + ¤t_slot, + ) + .unwrap(); + current_slot += 1; + println!("leaf {:?}", leaf); + // Insert the same value twice + { + // copy data so that failing test doesn't affect the state of + // subsequent tests + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = merkle_tree_account.insert_nullifier_into_queue( + &leaf.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ); + result.unwrap_err(); + // assert_eq!( + // result.unwrap_err(), + // BatchedMerkleTreeError::BatchInsertFailed.into() + // ); + } + // Try to insert first value into any batch + if tx == 0 { + first_value = leaf; + } else { + let mut mt_account_data = mt_account_data.clone(); + let mut merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let result = merkle_tree_account.insert_nullifier_into_queue( + &first_value.to_vec().try_into().unwrap(), + leaf_index as u64, + &tx_hash, + ¤t_slot, + ); + // assert_eq!( + // result.unwrap_err(), + // BatchedMerkleTreeError::BatchInsertFailed.into() + // ); + result.unwrap_err(); + // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); + } + } + // Assert input queue is full and doesn't accept more inserts + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let rnd_bytes = get_rnd_bytes(&mut rng); + let tx_hash = get_rnd_bytes(&mut rng); + let result = merkle_tree_account.insert_nullifier_into_queue( + &rnd_bytes, + 0, + &tx_hash, + ¤t_slot, + ); + assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); + } + // Root of the final batch of first input queue batch + let mut first_input_batch_update_root_value = [0u8; 32]; + let num_updates = + params.input_queue_batch_size / params.input_queue_zkp_batch_size * NUM_BATCHES as u64; + for i in 0..num_updates { + println!("input update ----------------------------- {}", i); + + perform_input_update( + &mut mt_account_data, + &mut mock_indexer, + false, + mt_pubkey, + &mut batch_roots, + ) + .await; + + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + + // after 5 updates the first batch is completely inserted + // As soon as we switch to inserting the second batch we zero out the first batch since + // the second batch is completely full. + if i >= 5 { + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + assert!(batch.bloom_filter_is_zeroed()); + + // Assert that none of the unsafe roots from batch 0 exist in root history + if let Some(unsafe_roots) = batch_roots.get_by_key(&0) { + for unsafe_root in unsafe_roots { + assert!( + !merkle_tree_account + .root_history + .iter() + .any(|x| *x == *unsafe_root), + "Unsafe root from batch 0 should be zeroed: {:?}", + unsafe_root + ); + } + } + } else { + let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); + assert!(!batch.bloom_filter_is_zeroed()); + } + let batch_one = &merkle_tree_account.queue_batches.batches[1]; + assert!(!batch_one.bloom_filter_is_zeroed()); + + println!( + "performed input queue batched update {} created root {:?}", + i, + mock_indexer.merkle_tree.root() + ); + if i == 4 { + first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); + } + let merkle_tree_account = + BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + println!( + "root {:?}", + merkle_tree_account.root_history.last().unwrap() + ); + println!( + "root last index {:?}", + merkle_tree_account.root_history.last_index() + ); + } + // assert all bloom_filters are inserted + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { + assert_eq!(batch.get_state(), BatchState::Inserted); + if i == 0 { + assert!(batch.bloom_filter_is_zeroed()); + } else { + assert!(!batch.bloom_filter_is_zeroed()); + } + } + } + // do one insert and expect that roots until merkle_tree_account.batches[0].root_index are zero + { + let merkle_tree_account = + &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) + .unwrap(); + let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); + + let value = &get_rnd_bytes(&mut rng); + let tx_hash = &get_rnd_bytes(&mut rng); + merkle_tree_account + .insert_nullifier_into_queue(value, 0, tx_hash, ¤t_slot) + .unwrap(); + { + let post_batch = *merkle_tree_account.queue_batches.batches.first().unwrap(); + assert_eq!(post_batch.get_state(), BatchState::Fill); + assert_eq!(post_batch.get_num_inserted_zkp_batch(), 1); + let bloom_filter_store = + merkle_tree_account.bloom_filter_stores.get_mut(0).unwrap(); + let mut bloom_filter = BloomFilter::new( + params.bloom_filter_num_iters as usize, + params.bloom_filter_capacity, + bloom_filter_store, + ) + .unwrap(); + assert!(bloom_filter.contains(value)); + } + + for root in merkle_tree_account.root_history.iter() { + println!("root {:?}", root); + } + println!( + "root in root index {:?}", + merkle_tree_account.root_history[pre_batch_zero.root_index as usize] + ); + // for batch_idx in 0..NUM_BATCHES as u32 { + // println!("batch idx {:?}", batch_idx); + // for root in batch_roots.get(batch_idx as usize).unwrap().1.iter() { + // println!("tracked root {:?}", root); + // } + // } + // check that all roots have been overwritten except the root index + // of the update + let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; + let start = merkle_tree_account.root_history.last_index() as u32; + println!("start {:?}", start); + for root in start + 1..pre_batch_zero.root_index + root_history_len { + println!("actual index {:?}", root); + let index = root % root_history_len; + + if index == pre_batch_zero.root_index { + let root_index = pre_batch_zero.root_index as usize; + + assert_eq!( + merkle_tree_account.root_history[root_index], + first_input_batch_update_root_value + ); + assert_eq!(merkle_tree_account.root_history[root_index], [0u8; 32]); + // First non zeroed root. + assert_ne!(merkle_tree_account.root_history[root_index + 1], [0u8; 32]); + break; + } + println!("index {:?}", index); + assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); + } + } + } +} diff --git a/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs b/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs deleted file mode 100644 index f06ad2e867..0000000000 --- a/program-tests/batched-merkle-tree-test/tests/merkle_tree.rs +++ /dev/null @@ -1,2206 +0,0 @@ -#![allow(unused_assignments)] -use std::cmp::min; - -use light_batched_merkle_tree::{ - batch::BatchState, - constants::{ - ACCOUNT_COMPRESSION_PROGRAM_ID, DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, - DEFAULT_BATCH_STATE_TREE_HEIGHT, NUM_BATCHES, - }, - errors::BatchedMerkleTreeError, - initialize_address_tree::{ - get_address_merkle_tree_account_size_from_params, init_batched_address_merkle_tree_account, - InitAddressTreeAccountsInstructionData, - }, - initialize_state_tree::{ - init_batched_state_merkle_tree_accounts, - test_utils::get_state_merkle_tree_account_size_from_params, - InitStateTreeAccountsInstructionData, - }, - merkle_tree::{ - assert_batch_adress_event, assert_batch_append_event_event, assert_nullify_event, - test_utils::get_merkle_tree_account_size_default, BatchedMerkleTreeAccount, - InstructionDataBatchAppendInputs, InstructionDataBatchNullifyInputs, - }, - merkle_tree_metadata::BatchedMerkleTreeMetadata, - queue::{ - test_utils::{ - get_output_queue_account_size_default, get_output_queue_account_size_from_params, - }, - BatchedQueueAccount, BatchedQueueMetadata, - }, -}; -use light_bloom_filter::{BloomFilter, BloomFilterError}; -use light_compressed_account::{ - hash_chain::create_hash_chain_from_slice, instruction_data::compressed_proof::CompressedProof, - pubkey::Pubkey, -}; -use light_hasher::{Hasher, Poseidon}; -use light_merkle_tree_reference::MerkleTree; -use light_prover_client::prover::spawn_prover; -use light_test_utils::mock_batched_forester::{ - MockBatchedAddressForester, MockBatchedForester, MockTxEvent, -}; -use light_zero_copy::vec::ZeroCopyVecU64; -use rand::{rngs::StdRng, Rng}; -use serial_test::serial; - -#[allow(clippy::too_many_arguments)] -pub fn assert_nullifier_queue_insert( - pre_account: BatchedMerkleTreeMetadata, - pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - pre_roots: Vec<[u8; 32]>, - pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - merkle_tree_account: BatchedMerkleTreeAccount, - bloom_filter_insert_values: Vec<[u8; 32]>, - leaf_indices: Vec, - tx_hash: [u8; 32], - input_is_in_tree: Vec, - array_indices: Vec, - current_slot: &u64, -) -> Result<(), BatchedMerkleTreeError> { - let mut leaf_hash_chain_insert_values = vec![]; - for (insert_value, leaf_index) in bloom_filter_insert_values.iter().zip(leaf_indices.iter()) { - let nullifier = - Poseidon::hashv(&[insert_value.as_slice(), &leaf_index.to_be_bytes(), &tx_hash]) - .unwrap(); - leaf_hash_chain_insert_values.push(nullifier); - } - assert_input_queue_insert( - pre_account, - pre_value_vecs, - pre_roots, - pre_hash_chains, - merkle_tree_account, - bloom_filter_insert_values, - leaf_hash_chain_insert_values, - input_is_in_tree, - array_indices, - current_slot, - ) -} -/// Insert into input queue: -/// 1. New value exists in the current batch bloom_filter -/// 2. New value does not exist in the other batch bloom_filters -#[allow(clippy::too_many_arguments)] -pub fn assert_input_queue_insert( - mut pre_account: BatchedMerkleTreeMetadata, - pre_value_vecs: &mut [ZeroCopyVecU64<[u8; 32]>], - pre_roots: Vec<[u8; 32]>, - mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - mut merkle_tree_account: BatchedMerkleTreeAccount, - bloom_filter_insert_values: Vec<[u8; 32]>, - leaf_hash_chain_insert_values: Vec<[u8; 32]>, - input_is_in_tree: Vec, - array_indices: Vec, - current_slot: &u64, -) -> Result<(), BatchedMerkleTreeError> { - let mut should_be_zeroed = false; - for (i, insert_value) in bloom_filter_insert_values.iter().enumerate() { - if !input_is_in_tree[i] { - let value_vec_index = array_indices[i]; - assert!( - pre_value_vecs.iter_mut().any(|value_vec| { - if value_vec.len() > value_vec_index { - { - if value_vec[value_vec_index] == *insert_value { - value_vec[value_vec_index] = [0u8; 32]; - true - } else { - false - } - } - } else { - false - } - }), - "Value not in value vec." - ); - } - - let post_roots: Vec<[u8; 32]> = merkle_tree_account.root_history.iter().cloned().collect(); - // if root buffer changed it must be only overwritten by [0u8;32] - if post_roots != pre_roots { - let only_zero_overwrites = post_roots - .iter() - .zip(pre_roots.iter()) - .all(|(post, pre)| *post == *pre || *post == [0u8; 32]); - println!("pre_roots: {:?}", pre_roots); - println!("post_roots: {:?}", post_roots); - if !only_zero_overwrites { - panic!("Root buffer changed.") - } - } - - let inserted_batch_index = - pre_account.queue_batches.currently_processing_batch_index as usize; - let expected_batch = pre_account - .queue_batches - .batches - .get_mut(inserted_batch_index) - .unwrap(); - - pre_account.queue_batches.next_index += 1; - - println!( - "assert input queue batch update: expected_batch: {:?}", - expected_batch - ); - println!( - "assert input queue batch update: expected_batch.get_num_inserted_elements(): {}", - expected_batch.get_num_inserted_elements() - ); - println!( - "assert input queue batch update: expected_batch.batch_size / 2: {}", - expected_batch.batch_size / 2 - ); - - if !should_be_zeroed && expected_batch.get_state() == BatchState::Inserted { - should_be_zeroed = - expected_batch.get_num_inserted_elements() == expected_batch.batch_size / 2; - } - println!( - "assert input queue batch update: should_be_zeroed: {}", - should_be_zeroed - ); - if expected_batch.get_state() == BatchState::Inserted { - println!("assert input queue batch update: clearing batch"); - pre_hash_chains[inserted_batch_index].clear(); - expected_batch.advance_state_to_fill(None).unwrap(); - expected_batch.set_start_slot(current_slot); - println!("setting start slot to {}", current_slot); - } else if expected_batch.get_state() == BatchState::Fill - && !expected_batch.start_slot_is_set() - { - // Batch is filled for the first time - expected_batch.set_start_slot(current_slot); - } - println!( - "assert input queue batch update: inserted_batch_index: {}", - inserted_batch_index - ); - // New value exists in the current batch bloom filter - let mut bloom_filter = BloomFilter::new( - merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize, - merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity, - merkle_tree_account.bloom_filter_stores[inserted_batch_index], - ) - .unwrap(); - println!( - "assert input queue batch update: insert_value: {:?}", - insert_value - ); - assert!(bloom_filter.contains(insert_value)); - let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); - expected_batch.add_to_hash_chain(&leaf_hash_chain_insert_values[i], pre_hash_chain)?; - - let num_iters = - merkle_tree_account.queue_batches.batches[inserted_batch_index].num_iters as usize; - let bloom_filter_capacity = - merkle_tree_account.queue_batches.batches[inserted_batch_index].bloom_filter_capacity; - // New value does not exist in the other batch bloom_filters - for (i, store) in merkle_tree_account - .bloom_filter_stores - .iter_mut() - .enumerate() - { - // Skip current batch it is already checked above - if i != inserted_batch_index { - let mut bloom_filter = - BloomFilter::new(num_iters, bloom_filter_capacity, store).unwrap(); - assert!(!bloom_filter.contains(insert_value)); - } - } - // if the currently processing batch changed it should - // increment by one and the old batch should be ready to - // update - if expected_batch.get_current_zkp_batch_index() == expected_batch.get_num_zkp_batches() { - assert_eq!( - merkle_tree_account.queue_batches.batches - [pre_account.queue_batches.currently_processing_batch_index as usize] - .get_state(), - BatchState::Full - ); - pre_account.queue_batches.currently_processing_batch_index += 1; - pre_account.queue_batches.currently_processing_batch_index %= - pre_account.queue_batches.num_batches; - assert_eq!( - merkle_tree_account.queue_batches.batches[inserted_batch_index], - *expected_batch - ); - assert_eq!( - merkle_tree_account.hash_chain_stores[inserted_batch_index] - .last() - .unwrap(), - pre_hash_chain.last().unwrap(), - "Hashchain store inconsistent." - ); - } - } - - assert_eq!( - *merkle_tree_account.get_metadata(), - pre_account, - "BatchedMerkleTreeMetadata changed." - ); - let inserted_batch_index = pre_account.queue_batches.currently_processing_batch_index as usize; - let mut expected_batch = pre_account.queue_batches.batches[inserted_batch_index]; - if should_be_zeroed { - expected_batch.set_bloom_filter_to_zeroed(); - } - assert_eq!( - merkle_tree_account.queue_batches.batches[inserted_batch_index], - expected_batch - ); - let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; - assert_eq!( - merkle_tree_account.queue_batches.batches[other_batch], - pre_account.queue_batches.batches[other_batch] - ); - assert_eq!( - merkle_tree_account.hash_chain_stores, pre_hash_chains, - "Hashchain store inconsistent." - ); - Ok(()) -} - -/// Expected behavior for insert into output queue: -/// - add value to value array -/// - batch.num_inserted += 1 -/// - if batch is full after insertion advance state to ReadyToUpdateTree -pub fn assert_output_queue_insert( - mut pre_account: BatchedQueueMetadata, - // mut pre_batches: Vec, - mut pre_value_store: [ZeroCopyVecU64<[u8; 32]>; 2], - mut pre_hash_chains: [ZeroCopyVecU64<[u8; 32]>; 2], - mut output_account: BatchedQueueAccount, - insert_values: Vec<[u8; 32]>, - current_slot: u64, -) -> Result<(), BatchedMerkleTreeError> { - for batch in output_account.batch_metadata.batches.iter_mut() { - println!("output_account.batch: {:?}", batch); - } - for batch in pre_account.batch_metadata.batches.iter() { - println!("pre_batch: {:?}", batch); - } - for insert_value in insert_values.iter() { - // if the currently processing batch changed it should - // increment by one and the old batch should be ready to - // update - - let inserted_batch_index = - pre_account.batch_metadata.currently_processing_batch_index as usize; - let expected_batch = &mut pre_account.batch_metadata.batches[inserted_batch_index]; - let pre_value_store = pre_value_store.get_mut(inserted_batch_index).unwrap(); - let pre_hash_chain = pre_hash_chains.get_mut(inserted_batch_index).unwrap(); - if expected_batch.get_state() == BatchState::Inserted { - expected_batch - .advance_state_to_fill(Some(pre_account.batch_metadata.next_index)) - .unwrap(); - pre_value_store.clear(); - pre_hash_chain.clear(); - } - pre_account.batch_metadata.next_index += 1; - expected_batch.store_and_hash_value( - insert_value, - pre_value_store, - pre_hash_chain, - ¤t_slot, - )?; - - let other_batch = if inserted_batch_index == 0 { 1 } else { 0 }; - assert!(output_account.value_vecs[inserted_batch_index] - .as_mut_slice() - .to_vec() - .contains(insert_value)); - assert!(!output_account.value_vecs[other_batch] - .as_mut_slice() - .to_vec() - .contains(insert_value)); - if expected_batch.get_num_zkp_batches() == expected_batch.get_current_zkp_batch_index() { - assert_eq!( - output_account.batch_metadata.batches - [pre_account.batch_metadata.currently_processing_batch_index as usize] - .get_state(), - BatchState::Full - ); - pre_account.batch_metadata.currently_processing_batch_index += 1; - pre_account.batch_metadata.currently_processing_batch_index %= - pre_account.batch_metadata.num_batches; - assert_eq!( - output_account.batch_metadata.batches[inserted_batch_index], - *expected_batch - ); - } - } - assert_eq!( - *output_account.get_metadata(), - pre_account, - "BatchedQueueAccount changed." - ); - assert_eq!(pre_hash_chains, output_account.hash_chain_stores); - for (i, (value_store, pre)) in output_account - .value_vecs - .iter() - .zip(pre_value_store.iter()) - .enumerate() - { - for (j, (value, pre_value)) in value_store.iter().zip(pre.iter()).enumerate() { - assert_eq!( - *value, *pre_value, - "{} {} \n value store {:?}\n pre {:?}", - i, j, value_store, pre - ); - } - } - assert_eq!(pre_value_store, output_account.value_vecs); - Ok(()) -} - -#[derive(Debug, PartialEq, Clone)] -pub struct MockTransactionInputs { - inputs: Vec<[u8; 32]>, - outputs: Vec<[u8; 32]>, -} - -pub fn simulate_transaction( - instruction_data: MockTransactionInputs, - merkle_tree_account_data: &mut [u8], - output_queue_account_data: &mut [u8], - reference_merkle_tree: &MerkleTree, - current_slot: &mut u64, - mt_pubkey: &Pubkey, -) -> Result { - let mut output_account = - BatchedQueueAccount::output_from_bytes(output_queue_account_data).unwrap(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(merkle_tree_account_data, mt_pubkey).unwrap(); - let flattened_inputs = instruction_data - .inputs - .iter() - .cloned() - .chain(instruction_data.outputs.iter().cloned()) - .collect::>(); - let tx_hash = create_hash_chain_from_slice(flattened_inputs.as_slice())?; - - for input in instruction_data.inputs.iter() { - // zkp inclusion in Merkle tree - let inclusion = reference_merkle_tree.get_leaf_index(input); - let leaf_index = if let Some(leaf_index) = inclusion { - leaf_index as u64 - } else { - println!("simulate_transaction: inclusion is none"); - let mut included = false; - let mut leaf_index = 0; - let start_indices = output_account - .batch_metadata - .batches - .iter() - .map(|batch| batch.start_index) - .collect::>(); - - for (batch_index, value_vec) in output_account.value_vecs.iter_mut().enumerate() { - for (value_index, value) in value_vec.iter_mut().enumerate() { - if *value == *input { - let batch_start_index = start_indices[batch_index]; - included = true; - println!("overwriting value: {:?}", value); - *value = [0u8; 32]; - leaf_index = value_index as u64 + batch_start_index; - } - } - } - if !included { - panic!("Value not included in any output queue or trees."); - } - leaf_index - }; - - println!( - "sim tx input: \n {:?} \nleaf index : {:?}, \ntx hash {:?}", - input, leaf_index, tx_hash, - ); - merkle_tree_account.insert_nullifier_into_queue( - input, - leaf_index, - &tx_hash, - current_slot, - )?; - } - - for output in instruction_data.outputs.iter() { - let leaf_index = output_account.batch_metadata.next_index; - println!( - "sim tx output: \n {:?} \nleaf index : {:?}", - output, leaf_index - ); - output_account.insert_into_current_batch(output, current_slot)?; - } - Ok(MockTxEvent { - inputs: instruction_data.inputs.clone(), - outputs: instruction_data.outputs.clone(), - tx_hash, - }) -} - -#[serial] -#[tokio::test] -async fn test_simulate_transactions() { - spawn_prover().await; - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let num_tx = 2200; - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_default(); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_merkle_tree_account_size_default(); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = ACCOUNT_COMPRESSION_PROGRAM_ID.into(); - - let params = InitStateTreeAccountsInstructionData::test_default(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - let mut in_ready_for_update = false; - let mut out_ready_for_update = false; - let mut num_output_updates = 0; - let mut num_input_updates = 0; - let mut num_input_values = 0; - let mut num_output_values = 0; - let mut current_slot = rng.gen(); - - for tx in 0..num_tx { - println!("tx: {}", tx); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_updates: {}", num_output_updates); - { - println!("Simulate tx {} -----------------------------", tx); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let number_of_outputs = rng.gen_range(0..7); - let mut outputs = vec![]; - for _ in 0..number_of_outputs { - outputs.push(get_rnd_bytes(&mut rng)); - } - let number_of_inputs = if rng.gen_bool(0.5) { - if !mock_indexer.active_leaves.is_empty() { - let x = min(mock_indexer.active_leaves.len(), 5); - rng.gen_range(0..x) - } else { - 0 - } - } else { - 0 - }; - - let mut inputs = vec![]; - let mut input_is_in_tree = vec![]; - let mut leaf_indices = vec![]; - let mut array_indices = vec![]; - let mut retries = min(10, mock_indexer.active_leaves.len()); - while inputs.len() < number_of_inputs && retries > 0 { - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - let inserted = mock_indexer.merkle_tree.get_leaf_index(&leaf); - if let Some(leaf_index) = inserted { - inputs.push(leaf); - leaf_indices.push(leaf_index as u64); - input_is_in_tree.push(true); - array_indices.push(0); - } else if rng.gen_bool(0.1) { - inputs.push(leaf); - let output_queue = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data) - .unwrap(); - let mut leaf_array_index = 0; - let mut batch_index = 0; - for (i, vec) in output_queue.value_vecs.iter().enumerate() { - let pos = vec.iter().position(|value| *value == leaf); - if let Some(pos) = pos { - leaf_array_index = pos; - batch_index = i; - break; - } - if i == output_queue.value_vecs.len() - 1 { - panic!("Leaf not found in output queue."); - } - } - let batch = output_queue - .batch_metadata - .batches - .get(batch_index) - .unwrap(); - array_indices.push(leaf_array_index); - let leaf_index: u64 = batch.start_index + leaf_array_index as u64; - leaf_indices.push(leaf_index); - input_is_in_tree.push(false); - } - retries -= 1; - } - let number_of_inputs = inputs.len(); - println!("number_of_inputs: {}", number_of_inputs); - - let instruction_data = MockTransactionInputs { - inputs: inputs.clone(), - outputs: outputs.clone(), - }; - - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!( - "input queue: {:?}", - merkle_tree_account.queue_batches.batches[0].get_num_inserted_zkp_batch() - ); - - let mut pre_mt_data = mt_account_data.clone(); - let mut pre_account_bytes = output_queue_account_data.clone(); - - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); - let pre_output_metadata = *pre_output_account.get_metadata(); - let mut pre_output_value_stores = pre_output_account.value_vecs; - let pre_output_hash_chains = pre_output_account.hash_chain_stores; - - let mut pre_mt_account_bytes = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_bytes, &mt_pubkey) - .unwrap(); - let pre_mt_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_mt_hash_chains = pre_merkle_tree_account.hash_chain_stores; - - if !outputs.is_empty() || !inputs.is_empty() { - println!("Simulating tx with inputs: {:?}", instruction_data); - let event = simulate_transaction( - instruction_data, - &mut pre_mt_data, - &mut output_queue_account_data, - &mock_indexer.merkle_tree, - &mut current_slot, - &mt_pubkey, - ) - .unwrap(); - mock_indexer.tx_events.push(event.clone()); - - if !inputs.is_empty() { - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) - .unwrap(); - println!("inputs: {:?}", inputs); - assert_nullifier_queue_insert( - pre_mt_account, - &mut pre_output_value_stores, // mut to remove values proven by index - pre_roots, - pre_mt_hash_chains, - merkle_tree_account, - inputs.clone(), - leaf_indices.clone(), - event.tx_hash, - input_is_in_tree, - array_indices, - ¤t_slot, - ) - .unwrap(); - } - - if !outputs.is_empty() { - assert_output_queue_insert( - pre_output_metadata, - pre_output_value_stores, - pre_output_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - outputs.clone(), - current_slot, - ) - .unwrap(); - } - - for i in 0..number_of_inputs { - mock_indexer - .input_queue_leaves - .push((inputs[i], leaf_indices[i] as usize)); - } - for output in outputs.iter() { - mock_indexer.active_leaves.push(*output); - mock_indexer.output_queue_leaves.push(*output); - } - - num_output_values += number_of_outputs; - num_input_values += number_of_inputs; - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_data, &mt_pubkey) - .unwrap(); - in_ready_for_update = merkle_tree_account - .queue_batches - .batches - .iter() - .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - out_ready_for_update = output_account - .batch_metadata - .batches - .iter() - .any(|batch| batch.get_first_ready_zkp_batch().is_ok()); - - mt_account_data = pre_mt_data.clone(); - } else { - println!("Skipping simulate tx for no inputs or outputs"); - } - current_slot += 1; - } - - if in_ready_for_update && rng.gen_bool(1.0) { - println!("Input update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let (input_res, new_root) = { - let mut account = BatchedMerkleTreeAccount::state_from_bytes( - &mut pre_mt_account_data, - &mt_pubkey, - ) - .unwrap(); - println!("batches {:?}", account.queue_batches.batches); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - println!( - "account - .hash_chain_stores {:?}", - account.hash_chain_stores - ); - println!("hash_chain store len {:?}", account.hash_chain_stores.len()); - println!( - "batch.get_num_inserted_zkps() as usize {:?}", - batch.get_num_inserted_zkps() as usize - ); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - - let (proof, new_root) = mock_indexer - .get_batched_update_proof( - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - ) - .await - .unwrap(); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_input_queue(instruction_data), - new_root, - ) - }; - println!("Input update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - let nullify_event = input_res.unwrap(); - in_ready_for_update = false; - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - assert_nullify_event(nullify_event, new_root, &old_account, mt_pubkey); - assert_merkle_tree_update(old_account, account, None, None, new_root); - mt_account_data = pre_mt_account_data.clone(); - - num_input_updates += 1; - } - - if out_ready_for_update && rng.gen_bool(1.0) { - println!("Output update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - let mut pre_output_queue_state = output_queue_account_data.clone(); - println!("Output update -----------------------------"); - - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - println!("output_res: {:?}", output_res); - assert!(output_res.is_ok()); - let batch_append_event = output_res.unwrap(); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let old_output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - assert_batch_append_event_event( - batch_append_event, - new_root, - &old_output_account, - &old_account, - mt_pubkey, - ); - assert_merkle_tree_update( - old_account, - account, - Some(old_output_account), - Some(output_account), - new_root, - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - out_ready_for_update = false; - num_output_updates += 1; - } - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - println!("num_output_updates: {}", num_output_updates); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); -} - -// Get random leaf that is not in the input queue. -pub fn get_random_leaf(rng: &mut StdRng, active_leaves: &mut Vec<[u8; 32]>) -> (usize, [u8; 32]) { - if active_leaves.is_empty() { - return (0, [0u8; 32]); - } - let index = rng.gen_range(0..active_leaves.len()); - // get random leaf from vector and remove it - (index, active_leaves.remove(index)) -} - -/// queues with a counter which keeps things below X tps and an if that -/// executes tree updates when possible. -#[serial] -#[tokio::test] -async fn test_e2e() { - spawn_prover().await; - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let num_tx = 2200; - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_default(); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_merkle_tree_account_size_default(); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let params = InitStateTreeAccountsInstructionData::test_default(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - let mut in_ready_for_update; - let mut out_ready_for_update; - let mut num_output_updates = 0; - let mut num_input_updates = 0; - let mut num_input_values = 0; - let mut num_output_values = 0; - let mut current_slot = rng.gen(); - - for tx in 0..num_tx { - println!("tx: {}", tx); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_updates: {}", num_output_updates); - // Output queue - { - if rng.gen_bool(0.5) { - println!("Output insert -----------------------------"); - println!("num_output_values: {}", num_output_values); - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut pre_account_bytes = output_queue_account_data.clone(); - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_account_bytes).unwrap(); - let pre_account = *pre_output_account.get_metadata(); - let pre_value_store = pre_output_account.value_vecs; - let pre_hash_chains = pre_output_account.hash_chain_stores; - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - output_account - .insert_into_current_batch(&rnd_bytes, ¤t_slot) - .unwrap(); - assert_output_queue_insert( - pre_account, - pre_value_store, - pre_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - vec![rnd_bytes], - current_slot, - ) - .unwrap(); - current_slot += 1; - num_output_values += 1; - mock_indexer.output_queue_leaves.push(rnd_bytes); - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - out_ready_for_update = output_account - .batch_metadata - .batches - .iter() - .any(|batch| batch.get_state() == BatchState::Full); - } - - // Input queue - { - let mut pre_account_bytes = mt_account_data.clone(); - - if rng.gen_bool(0.5) && !mock_indexer.active_leaves.is_empty() { - println!("Input insert -----------------------------"); - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - - let pre_mt_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_account_bytes, &mt_pubkey) - .unwrap(); - let pre_account = *pre_mt_account.get_metadata(); - let pre_hash_chains = pre_mt_account.hash_chain_stores; - let pre_roots = pre_mt_account.root_history.iter().cloned().collect(); - let tx_hash = create_hash_chain_from_slice(vec![leaf].as_slice()).unwrap(); - let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); - mock_indexer.input_queue_leaves.push((leaf, leaf_index)); - mock_indexer.tx_events.push(MockTxEvent { - inputs: vec![leaf], - outputs: vec![], - tx_hash, - }); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - merkle_tree_account - .insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ) - .unwrap(); - - { - let mut mt_account_data = mt_account_data.clone(); - let merkle_tree_account = BatchedMerkleTreeAccount::state_from_bytes( - &mut mt_account_data, - &mt_pubkey, - ) - .unwrap(); - assert_nullifier_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![leaf], - vec![leaf_index as u64], - tx_hash, - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - } - num_input_values += 1; - } - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - in_ready_for_update = merkle_tree_account - .queue_batches - .batches - .iter() - .any(|batch| batch.get_state() == BatchState::Full); - } - - if in_ready_for_update { - println!("Input update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - in_ready_for_update = false; - perform_input_update(&mut pre_mt_account_data, &mut mock_indexer, true, mt_pubkey) - .await; - mt_account_data = pre_mt_account_data.clone(); - - num_input_updates += 1; - } - - if out_ready_for_update { - println!("Output update -----------------------------"); - println!("Num inserted values: {}", num_input_values); - println!("Num input updates: {}", num_input_updates); - println!("Num output updates: {}", num_output_updates); - println!("Num output values: {}", num_output_values); - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves = output_account - .value_vecs - .get(next_full_batch as usize) - .unwrap() - .to_vec(); - println!("leaves {:?}", leaves.len()); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; - let end = start + batch.zkp_batch_size as usize; - for leaf in &leaves[start..end] { - // Storing the leaf in the output queue indexer so that it - // can be inserted into the input queue later. - mock_indexer.active_leaves.push(*leaf); - } - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - let mut pre_output_queue_state = output_queue_account_data.clone(); - println!("Output update -----------------------------"); - - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - println!( - "post update: sequence number: {}", - account.get_metadata().sequence_number - ); - println!("output_res {:?}", output_res); - assert!(output_res.is_ok()); - - println!("output update success {}", num_output_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let old_output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let old_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - assert_merkle_tree_update( - old_account, - account, - Some(old_output_account), - Some(output_account), - new_root, - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - out_ready_for_update = false; - num_output_updates += 1; - } - } - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - println!("batch 0: {:?}", output_account.batch_metadata.batches[0]); - println!("batch 1: {:?}", output_account.batch_metadata.batches[1]); - println!("num_output_updates: {}", num_output_updates); - println!("num_input_updates: {}", num_input_updates); - println!("num_output_values: {}", num_output_values); - println!("num_input_values: {}", num_input_values); -} -pub async fn perform_input_update( - mt_account_data: &mut [u8], - mock_indexer: &mut MockBatchedForester<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>, - enable_assert: bool, - mt_pubkey: Pubkey, -) { - let mut cloned_mt_account_data = (*mt_account_data).to_vec(); - let old_account = BatchedMerkleTreeAccount::state_from_bytes( - cloned_mt_account_data.as_mut_slice(), - &mt_pubkey, - ) - .unwrap(); - let (input_res, root) = { - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_update_proof( - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - ) - .await - .unwrap(); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_input_queue(instruction_data), - new_root, - ) - }; - println!("Input update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = BatchedMerkleTreeAccount::state_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - if enable_assert { - assert_merkle_tree_update(old_account, account, None, None, root); - } -} - -pub async fn perform_address_update( - mt_account_data: &mut [u8], - mock_indexer: &mut MockBatchedAddressForester<40>, - mt_pubkey: Pubkey, -) { - println!("pre address update -----------------------------"); - let mut cloned_mt_account_data = (*mt_account_data).to_vec(); - let old_account = BatchedMerkleTreeAccount::address_from_bytes( - cloned_mt_account_data.as_mut_slice(), - &mt_pubkey, - ) - .unwrap(); - let (input_res, new_root, _pre_next_full_batch) = { - let mut account = - BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - let next_full_batch = account.get_metadata().queue_batches.pending_batch_index; - let next_index = account.get_metadata().next_index; - println!("next index {:?}", next_index); - let batch = account - .queue_batches - .batches - .get(next_full_batch as usize) - .unwrap(); - let batch_start_index = - batch.start_index + batch.get_num_inserted_zkps() * batch.zkp_batch_size; - println!("batch start index {}", batch_start_index); - let leaves_hash_chain = account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let current_root = account.root_history.last().unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_address_proof( - account.get_metadata().queue_batches.batch_size as u32, - account.get_metadata().queue_batches.zkp_batch_size as u32, - *leaves_hash_chain, - next_index as usize, - batch_start_index as usize, - *current_root, - ) - .await - .unwrap(); - - mock_indexer.finalize_batch_address_update(10); - assert_eq!(mock_indexer.merkle_tree.root(), new_root); - let instruction_data = InstructionDataBatchNullifyInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - ( - account.update_tree_from_address_queue(instruction_data), - new_root, - next_full_batch, - ) - }; - println!("post address update -----------------------------"); - println!("res {:?}", input_res); - assert!(input_res.is_ok()); - let event = input_res.unwrap(); - assert_batch_adress_event(event, new_root, &old_account, mt_pubkey); - - // assert Merkle tree - // sequence number increased X - // next index increased X - // current root index increased X - // One root changed one didn't - - let account = - BatchedMerkleTreeAccount::address_from_bytes(mt_account_data, &mt_pubkey).unwrap(); - - assert_address_merkle_tree_update(old_account, account, new_root); -} - -fn assert_merkle_tree_update( - mut old_account: BatchedMerkleTreeAccount, - account: BatchedMerkleTreeAccount, - old_queue_account: Option, - queue_account: Option, - root: [u8; 32], -) { - let input_queue_previous_batch_state = - old_account.queue_batches.get_previous_batch().get_state(); - let input_queue_current_batch = old_account.queue_batches.get_current_batch(); - let previous_batch_index = old_account.queue_batches.get_previous_batch_index(); - let is_half_full = input_queue_current_batch.get_num_inserted_elements() - >= input_queue_current_batch.batch_size / 2 - && input_queue_current_batch.get_state() != BatchState::Inserted; - if is_half_full - && input_queue_previous_batch_state == BatchState::Inserted - && !old_account - .queue_batches - .get_previous_batch() - .bloom_filter_is_zeroed() - { - old_account - .queue_batches - .get_previous_batch_mut() - .set_bloom_filter_to_zeroed(); - old_account.bloom_filter_stores[previous_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - let previous_full_batch = old_account - .queue_batches - .batches - .get(previous_batch_index) - .unwrap(); - let sequence_number = previous_full_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - let mut oldest_root_index = old_account.root_history.first_index(); - // 2.1. Get, num of remaining roots. - // Remaining roots have not been updated since - // the update of the previous batch hence enable to prove - // inclusion of values nullified in the previous batch. - let num_remaining_roots = sequence_number - old_account.sequence_number; - // 2.2. Zero out roots oldest to first safe root index. - // Skip one iteration we don't need to zero out - // the first safe root. - for _ in 1..num_remaining_roots { - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - // Output queue update - if let Some(mut old_queue_account) = old_queue_account { - let queue_account = queue_account.unwrap(); - let old_full_batch_index = old_queue_account.batch_metadata.pending_batch_index; - let old_full_batch = old_queue_account - .batch_metadata - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - old_account.root_history.capacity() as u32, - ) - .unwrap(); - - if old_full_batch.get_state() == BatchState::Inserted { - old_queue_account.batch_metadata.pending_batch_index += 1; - old_queue_account.batch_metadata.pending_batch_index %= 2; - } - assert_eq!( - queue_account.get_metadata(), - old_queue_account.get_metadata() - ); - assert_eq!(queue_account, old_queue_account); - // Only the output queue appends state - let zkp_batch_size = old_account.queue_batches.zkp_batch_size; - old_account.next_index += zkp_batch_size; - } else { - // Input queue update - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - let history_capacity = old_account.root_history.capacity(); - let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; - let zkp_batch_size = old_account.queue_batches.zkp_batch_size; - old_account.nullifier_next_index += zkp_batch_size; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - history_capacity as u32, - ) - .unwrap(); - println!( - "current batch {:?}", - old_full_batch.get_num_inserted_elements() - ); - - if old_full_batch.get_state() == BatchState::Inserted { - old_account.queue_batches.pending_batch_index += 1; - old_account.queue_batches.pending_batch_index %= 2; - } - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - let zeroed_batch = old_full_batch.get_num_inserted_elements() - >= old_full_batch.batch_size / 2 - && old_full_batch.get_state() != BatchState::Inserted; - println!("zeroed_batch: {:?}", zeroed_batch); - - let state = account.queue_batches.batches[previous_full_batch_index].get_state(); - let previous_batch = old_account - .queue_batches - .batches - .get_mut(previous_full_batch_index) - .unwrap(); - println!( - "zeroing out values: {}", - zeroed_batch && state == BatchState::Inserted - ); - if zeroed_batch && state == BatchState::Inserted { - previous_batch.set_bloom_filter_to_zeroed(); - let sequence_number = previous_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - old_account.bloom_filter_stores[previous_full_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - - let mut oldest_root_index = old_account.root_history.first_index(); - - let num_remaining_roots = sequence_number - old_account.sequence_number; - for _ in 1..num_remaining_roots { - println!("zeroing out root index: {}", oldest_root_index); - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - } - - old_account.sequence_number += 1; - old_account.root_history.push(root); - assert_eq!(account.get_metadata(), old_account.get_metadata()); - assert_eq!(account, old_account); - assert_eq!(*account.root_history.last().unwrap(), root); -} - -fn assert_address_merkle_tree_update( - mut old_account: BatchedMerkleTreeAccount, - account: BatchedMerkleTreeAccount, - root: [u8; 32], -) { - { - // Input queue update - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - let history_capacity = old_account.root_history.capacity(); - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - - old_full_batch - .mark_as_inserted_in_merkle_tree( - account.sequence_number, - account.root_history.last_index() as u32, - history_capacity as u32, - ) - .unwrap(); - if old_full_batch.get_state() == BatchState::Inserted { - old_account.queue_batches.pending_batch_index += 1; - old_account.queue_batches.pending_batch_index %= 2; - } - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let previous_full_batch_index = if old_full_batch_index == 0 { 1 } else { 0 }; - - let old_full_batch_index = old_account.queue_batches.pending_batch_index; - - let old_full_batch = old_account - .queue_batches - .batches - .get_mut(old_full_batch_index as usize) - .unwrap(); - let zeroed_batch = old_full_batch.get_num_inserted_elements() - >= old_full_batch.batch_size / 2 - && old_full_batch.get_state() != BatchState::Inserted; - println!("zeroed_batch: {:?}", zeroed_batch); - let state = account.queue_batches.batches[previous_full_batch_index].get_state(); - let previous_batch = old_account - .queue_batches - .batches - .get_mut(previous_full_batch_index) - .unwrap(); - if zeroed_batch && state == BatchState::Inserted { - previous_batch.set_bloom_filter_to_zeroed(); - let sequence_number = previous_batch.sequence_number; - let overlapping_roots_exits = sequence_number > old_account.sequence_number; - if overlapping_roots_exits { - old_account.bloom_filter_stores[previous_full_batch_index] - .iter_mut() - .for_each(|elem| { - *elem = 0; - }); - - let mut oldest_root_index = old_account.root_history.first_index(); - - let num_remaining_roots = sequence_number - old_account.sequence_number; - for _ in 1..num_remaining_roots { - println!("zeroing out root index: {}", oldest_root_index); - old_account.root_history[oldest_root_index] = [0u8; 32]; - oldest_root_index += 1; - oldest_root_index %= old_account.root_history.len(); - } - } - } - } - - old_account.sequence_number += 1; - old_account.next_index += old_account.queue_batches.zkp_batch_size; - old_account.root_history.push(root); - assert_eq!(account.get_metadata(), old_account.get_metadata()); - assert_eq!(account, old_account); - assert_eq!(*account.root_history.last().unwrap(), root); -} - -pub fn get_rnd_bytes(rng: &mut StdRng) -> [u8; 32] { - let mut rnd_bytes = rng.gen::<[u8; 32]>(); - rnd_bytes[0] = 0; - rnd_bytes -} - -#[serial] -#[tokio::test] -async fn test_fill_state_queues_completely() { - spawn_prover().await; - let mut current_slot = 1; - let roothistory_capacity = vec![17, 80]; - for root_history_capacity in roothistory_capacity { - let mut mock_indexer = - MockBatchedForester::<{ DEFAULT_BATCH_STATE_TREE_HEIGHT as usize }>::default(); - - let mut params = InitStateTreeAccountsInstructionData::test_default(); - params.output_queue_batch_size = params.input_queue_batch_size * 10; - // Root history capacity which is greater than the input updates - params.root_history_capacity = root_history_capacity; - - let owner = Pubkey::new_unique(); - - let queue_account_size = get_output_queue_account_size_from_params(params); - - let mut output_queue_account_data = vec![0; queue_account_size]; - let output_queue_pubkey = Pubkey::new_unique(); - - let mt_account_size = get_state_merkle_tree_account_size_from_params(params); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let merkle_tree_rent = 1_000_000_000; - let queue_rent = 1_000_000_000; - let additional_bytes_rent = 1000; - - init_batched_state_merkle_tree_accounts( - owner, - params, - &mut output_queue_account_data, - output_queue_pubkey, - queue_rent, - &mut mt_account_data, - mt_pubkey, - merkle_tree_rent, - additional_bytes_rent, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - - let num_tx = NUM_BATCHES as u64 * params.output_queue_batch_size; - - // Fill up complete output queue - for _ in 0..num_tx { - // Output queue - - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut pre_output_queue_account_data = output_queue_account_data.clone(); - let pre_output_account = - BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_account_data).unwrap(); - let pre_account = *pre_output_account.get_metadata(); - let pre_value_store = pre_output_account.value_vecs; - let pre_hash_chains = pre_output_account.hash_chain_stores; - - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - output_account - .insert_into_current_batch(&rnd_bytes, ¤t_slot) - .unwrap(); - assert_output_queue_insert( - pre_account, - pre_value_store, - pre_hash_chains, - BatchedQueueAccount::output_from_bytes( - &mut output_queue_account_data.clone(), // clone so that data cannot be modified - ) - .unwrap(), - vec![rnd_bytes], - current_slot, - ) - .unwrap(); - current_slot += 1; - mock_indexer.output_queue_leaves.push(rnd_bytes); - } - let rnd_bytes = get_rnd_bytes(&mut rng); - let mut output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - - let result = output_account.insert_into_current_batch(&rnd_bytes, ¤t_slot); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - - output_account - .batch_metadata - .batches - .iter() - .for_each(|b| assert_eq!(b.get_state(), BatchState::Full)); - - // Batch insert output queue into merkle tree. - for _ in 0..output_account - .get_metadata() - .batch_metadata - .get_num_zkp_batches() - { - println!("Output update -----------------------------"); - let mut pre_mt_account_data = mt_account_data.clone(); - let mut account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let mut pre_output_queue_state = output_queue_account_data.clone(); - let output_account = - BatchedQueueAccount::output_from_bytes(&mut output_queue_account_data).unwrap(); - let next_index = account.get_metadata().next_index; - let next_full_batch = output_account - .get_metadata() - .batch_metadata - .pending_batch_index; - let batch = output_account - .batch_metadata - .batches - .get(next_full_batch as usize) - .unwrap(); - let leaves = mock_indexer.output_queue_leaves.clone(); - let leaves_hash_chain = output_account - .hash_chain_stores - .get(next_full_batch as usize) - .unwrap() - .get(batch.get_num_inserted_zkps() as usize) - .unwrap(); - let (proof, new_root) = mock_indexer - .get_batched_append_proof( - next_index as usize, - batch.get_num_inserted_zkps() as u32, - batch.zkp_batch_size as u32, - *leaves_hash_chain, - batch.get_num_zkp_batches() as u32, - ) - .await - .unwrap(); - let start = batch.get_num_inserted_zkps() as usize * batch.zkp_batch_size as usize; - let end = start + batch.zkp_batch_size as usize; - for leaf in &leaves[start..end] { - // Storing the leaf in the output queue indexer so that it - // can be inserted into the input queue later. - mock_indexer.active_leaves.push(*leaf); - } - - let instruction_data = InstructionDataBatchAppendInputs { - new_root, - compressed_proof: CompressedProof { - a: proof.a, - b: proof.b, - c: proof.c, - }, - }; - - println!("Output update -----------------------------"); - let queue_account = - &mut BatchedQueueAccount::output_from_bytes(&mut pre_output_queue_state).unwrap(); - let output_res = - account.update_tree_from_output_queue_account(queue_account, instruction_data); - assert!(output_res.is_ok()); - - assert_eq!( - *account.root_history.last().unwrap(), - mock_indexer.merkle_tree.root() - ); - - output_queue_account_data = pre_output_queue_state; - mt_account_data = pre_mt_account_data; - } - - // Fill up complete input queue. - let num_tx = NUM_BATCHES as u64 * params.input_queue_batch_size; - let mut first_value = [0u8; 32]; - for tx in 0..num_tx { - println!("Input insert ----------------------------- {}", tx); - let (_, leaf) = get_random_leaf(&mut rng, &mut mock_indexer.active_leaves); - let leaf_index = mock_indexer.merkle_tree.get_leaf_index(&leaf).unwrap(); - - let mut pre_mt_account_data = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut pre_mt_account_data, &mt_pubkey) - .unwrap(); - let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; - let tx_hash = create_hash_chain_from_slice(&[leaf]).unwrap(); - // Index input queue insert event - mock_indexer.input_queue_leaves.push((leaf, leaf_index)); - mock_indexer.tx_events.push(MockTxEvent { - inputs: vec![leaf], - outputs: vec![], - tx_hash, - }); - println!("leaf {:?}", leaf); - println!("leaf_index {:?}", leaf_index); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - merkle_tree_account - .insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ) - .unwrap(); - println!("current slot {:?}", current_slot); - assert_nullifier_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![leaf], - vec![leaf_index as u64], - tx_hash, - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - println!("leaf {:?}", leaf); - // Insert the same value twice - { - // copy data so that failing test doesn't affect the state of - // subsequent tests - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = merkle_tree_account.insert_nullifier_into_queue( - &leaf.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ); - result.unwrap_err(); - // assert_eq!( - // result.unwrap_err(), - // BatchedMerkleTreeError::BatchInsertFailed.into() - // ); - } - // Try to insert first value into any batch - if tx == 0 { - first_value = leaf; - } else { - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = merkle_tree_account.insert_nullifier_into_queue( - &first_value.to_vec().try_into().unwrap(), - leaf_index as u64, - &tx_hash, - ¤t_slot, - ); - // assert_eq!( - // result.unwrap_err(), - // BatchedMerkleTreeError::BatchInsertFailed.into() - // ); - result.unwrap_err(); - // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - } - // Assert input queue is full and doesn't accept more inserts - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let rnd_bytes = get_rnd_bytes(&mut rng); - let tx_hash = get_rnd_bytes(&mut rng); - let result = merkle_tree_account.insert_nullifier_into_queue( - &rnd_bytes, - 0, - &tx_hash, - ¤t_slot, - ); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } - // Root of the final batch of first input queue batch - let mut first_input_batch_update_root_value = [0u8; 32]; - let num_updates = - params.input_queue_batch_size / params.input_queue_zkp_batch_size * NUM_BATCHES as u64; - for i in 0..num_updates { - println!("input update ----------------------------- {}", i); - perform_input_update(&mut mt_account_data, &mut mock_indexer, false, mt_pubkey).await; - - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - // after 5 updates the first batch is completely inserted - // As soon as we switch to inserting the second batch we zero out the first batch since - // the second batch is completely full. - if i >= 4 { - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - assert!(batch.bloom_filter_is_zeroed()); - } else { - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - assert!(!batch.bloom_filter_is_zeroed()); - } - let batch_one = &merkle_tree_account.queue_batches.batches[1]; - assert!(!batch_one.bloom_filter_is_zeroed()); - - println!( - "performed input queue batched update {} created root {:?}", - i, - mock_indexer.merkle_tree.root() - ); - if i == 4 { - first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); - } - let merkle_tree_account = - BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!( - "root {:?}", - merkle_tree_account.root_history.last().unwrap() - ); - println!( - "root last index {:?}", - merkle_tree_account.root_history.last_index() - ); - } - // assert all bloom_filters are inserted - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { - assert_eq!(batch.get_state(), BatchState::Inserted); - if i == 0 { - assert!(batch.bloom_filter_is_zeroed()); - } else { - assert!(!batch.bloom_filter_is_zeroed()); - } - } - } - // do one insert and expect that roots until merkle_tree_account.batches[0].root_index are zero - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::state_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); - - let value = &get_rnd_bytes(&mut rng); - let tx_hash = &get_rnd_bytes(&mut rng); - merkle_tree_account - .insert_nullifier_into_queue(value, 0, tx_hash, ¤t_slot) - .unwrap(); - { - let post_batch = *merkle_tree_account.queue_batches.batches.first().unwrap(); - assert_eq!(post_batch.get_state(), BatchState::Fill); - assert_eq!(post_batch.get_num_inserted_zkp_batch(), 1); - let bloom_filter_store = - merkle_tree_account.bloom_filter_stores.get_mut(0).unwrap(); - let mut bloom_filter = BloomFilter::new( - params.bloom_filter_num_iters as usize, - params.bloom_filter_capacity, - bloom_filter_store, - ) - .unwrap(); - assert!(bloom_filter.contains(value)); - } - - for root in merkle_tree_account.root_history.iter() { - println!("root {:?}", root); - } - println!( - "root in root index {:?}", - merkle_tree_account.root_history[pre_batch_zero.root_index as usize] - ); - // check that all roots have been overwritten except the root index - // of the update - let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; - let start = merkle_tree_account.root_history.last_index() as u32; - println!("start {:?}", start); - for root in start + 1..pre_batch_zero.root_index + root_history_len { - println!("actual index {:?}", root); - let index = root % root_history_len; - - if index == pre_batch_zero.root_index { - let root_index = pre_batch_zero.root_index as usize; - - assert_eq!( - merkle_tree_account.root_history[root_index], - first_input_batch_update_root_value - ); - assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); - break; - } - println!("index {:?}", index); - assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); - } - } - } -} - -#[serial] -#[tokio::test] -async fn test_fill_address_tree_completely() { - spawn_prover().await; - let mut current_slot = 1; - let roothistory_capacity = vec![17, 80]; // - for root_history_capacity in roothistory_capacity { - let mut mock_indexer = - MockBatchedAddressForester::<{ DEFAULT_BATCH_ADDRESS_TREE_HEIGHT as usize }>::default(); - - let mut params = InitAddressTreeAccountsInstructionData::test_default(); - // Root history capacity which is greater than the input updates - params.root_history_capacity = root_history_capacity; - - let owner = Pubkey::new_unique(); - - let mt_account_size = get_address_merkle_tree_account_size_from_params(params); - let mut mt_account_data = vec![0; mt_account_size]; - let mt_pubkey = Pubkey::new_unique(); - - let merkle_tree_rent = 1_000_000_000; - - init_batched_address_merkle_tree_account( - owner, - params, - &mut mt_account_data, - merkle_tree_rent, - mt_pubkey, - ) - .unwrap(); - use rand::SeedableRng; - let mut rng = StdRng::seed_from_u64(0); - - let num_tx = NUM_BATCHES * params.input_queue_batch_size as usize; - let mut first_value = [0u8; 32]; - for tx in 0..num_tx { - println!("Input insert -----------------------------"); - let mut rnd_address = get_rnd_bytes(&mut rng); - rnd_address[0] = 0; - - let mut pre_account_data = mt_account_data.clone(); - let pre_merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut pre_account_data, &mt_pubkey) - .unwrap(); - let pre_account = *pre_merkle_tree_account.get_metadata(); - let pre_roots = pre_merkle_tree_account - .root_history - .iter() - .cloned() - .collect(); - let pre_hash_chains = pre_merkle_tree_account.hash_chain_stores; - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - merkle_tree_account - .insert_address_into_queue(&rnd_address, ¤t_slot) - .unwrap(); - assert_input_queue_insert( - pre_account, - &mut [], - pre_roots, - pre_hash_chains, - merkle_tree_account, - vec![rnd_address], - vec![rnd_address], - vec![true], - vec![], - ¤t_slot, - ) - .unwrap(); - current_slot += 1; - mock_indexer.queue_leaves.push(rnd_address); - - // Insert the same value twice - { - // copy data so that failing test doesn't affect the state of - // subsequent tests - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let result = - merkle_tree_account.insert_address_into_queue(&rnd_address, ¤t_slot); - println!("tx {}", tx); - println!("errors {:?}", result); - if tx == params.input_queue_batch_size as usize * 2 - 1 { - // Error when the value is already inserted into the other batch. - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } else if tx == params.input_queue_batch_size as usize - 1 { - // Error when the value is already inserted into the other batch. - // This occurs only when we switch the batch in this test. - assert_eq!( - result.unwrap_err(), - BatchedMerkleTreeError::NonInclusionCheckFailed - ); - } else { - // Error when inserting into the bloom filter directly twice. - assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - - current_slot += 1; - } - // Try to insert first value into any batch - if tx == 0 { - first_value = rnd_address; - } else { - let mut mt_account_data = mt_account_data.clone(); - let mut merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - - let result = merkle_tree_account.insert_address_into_queue( - &first_value.to_vec().try_into().unwrap(), - ¤t_slot, - ); - println!("tx {}", tx); - println!("result {:?}", result); - if tx == params.input_queue_batch_size as usize * 2 - 1 { - // Error when the value is already inserted into the other batch. - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } else if tx >= params.input_queue_batch_size as usize - 1 - // || tx == params.input_queue_batch_size as usize - { - // Error when the value is already inserted into the other batch. - // This occurs only when we switch the batch in this test. - assert_eq!( - result.unwrap_err(), - BatchedMerkleTreeError::NonInclusionCheckFailed - ); - } else { - // Error when inserting into the bloom filter directly twice. - assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - current_slot += 1; - - // assert_eq!(result.unwrap_err(), BloomFilterError::Full.into()); - } - } - // Assert input queue is full and doesn't accept more inserts - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let rnd_bytes = get_rnd_bytes(&mut rng); - let result = merkle_tree_account.insert_address_into_queue(&rnd_bytes, ¤t_slot); - assert_eq!(result.unwrap_err(), BatchedMerkleTreeError::BatchNotReady); - } - // Root of the final batch of first input queue batch - let mut first_input_batch_update_root_value = [0u8; 32]; - let num_updates = 10; - for i in 0..num_updates { - println!("address update ----------------------------- {}", i); - perform_address_update(&mut mt_account_data, &mut mock_indexer, mt_pubkey).await; - if i == 4 { - first_input_batch_update_root_value = mock_indexer.merkle_tree.root(); - } - let merkle_tree_account = - BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - let batch = merkle_tree_account.queue_batches.batches.first().unwrap(); - let batch_one = merkle_tree_account.queue_batches.batches.get(1).unwrap(); - assert!(!batch_one.bloom_filter_is_zeroed()); - - // after 5 updates the first batch is completely inserted - // As soon as we switch to inserting the second batch we zero out the first batch since - // the second batch is completely full. - if i >= 4 { - assert!(batch.bloom_filter_is_zeroed()); - } else { - assert!(!batch.bloom_filter_is_zeroed()); - } - } - // assert all bloom_filters are inserted - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - for (i, batch) in merkle_tree_account.queue_batches.batches.iter().enumerate() { - assert_eq!(batch.get_state(), BatchState::Inserted); - if i == 0 { - // first batch is zeroed out since the second batch is full - assert!(batch.bloom_filter_is_zeroed()); - } else { - // second batch is not zeroed out since the first batch is empty - assert!(!batch.bloom_filter_is_zeroed()); - } - } - } - { - let merkle_tree_account = - &mut BatchedMerkleTreeAccount::address_from_bytes(&mut mt_account_data, &mt_pubkey) - .unwrap(); - println!("root history {:?}", merkle_tree_account.root_history); - let pre_batch_zero = *merkle_tree_account.queue_batches.batches.first().unwrap(); - - for root in merkle_tree_account.root_history.iter() { - println!("root {:?}", root); - } - println!( - "root in root index {:?}", - merkle_tree_account.root_history[pre_batch_zero.root_index as usize] - ); - // check that all roots have been overwritten except the root index - // of the update - let root_history_len: u32 = merkle_tree_account.root_history.len() as u32; - let start = merkle_tree_account.root_history.last_index() as u32; - println!("start {:?}", start); - for root in start + 1..pre_batch_zero.root_index + root_history_len { - println!("actual index {:?}", root); - let index = root % root_history_len; - - if index == pre_batch_zero.root_index { - let root_index = pre_batch_zero.root_index as usize; - - assert_eq!( - merkle_tree_account.root_history[root_index], - first_input_batch_update_root_value - ); - assert_eq!(merkle_tree_account.root_history[root_index - 1], [0u8; 32]); - break; - } - println!("index {:?}", index); - assert_eq!(merkle_tree_account.root_history[index as usize], [0u8; 32]); - } - } - } -} diff --git a/program-tests/registry-test/tests/tests.rs b/program-tests/registry-test/tests/tests.rs index 1f8cf2d793..3c20c6fac4 100644 --- a/program-tests/registry-test/tests/tests.rs +++ b/program-tests/registry-test/tests/tests.rs @@ -1880,7 +1880,7 @@ async fn test_batch_address_tree() { { println!("pre perform_batch_address_merkle_tree_update"); - for _ in 0..4 { + for _ in 0..6 { perform_batch_address_merkle_tree_update( &mut rpc, &mut test_indexer, @@ -1928,7 +1928,7 @@ async fn test_batch_address_tree() { .await .unwrap(); } - for _ in 0..5 { + for _ in 0..3 { perform_batch_address_merkle_tree_update( &mut rpc, &mut test_indexer,