diff --git a/.cargo/config b/.cargo/config index 7f7e28a8b84..a3f905bff38 100644 --- a/.cargo/config +++ b/.cargo/config @@ -2,10 +2,13 @@ stacks-node = "run --package stacks-node --" fmt-stacks = "fmt -- --config group_imports=StdExternalCrate,imports_granularity=Module" -# Uncomment to improve performance slightly, at the cost of portability -# * Note that native binaries may not run on CPUs that are different from the build machine -# [build] -# rustflags = ["-Ctarget-cpu=native"] +# For x86_64 CPUs, default to `native` and override in CI for release builds +# This makes it slightly faster for users running locally built binaries. +# This can cause trouble when building "portable" binaries, such as for docker, +# so disable it with the "portable" feature. +# TODO: Same for other targets? +[target.'cfg(all(target_arch = "x86_64", not(feature = portable))'] +rustflags = ["-Ctarget-cpu=native"] # Needed by perf to generate flamegraphs. #[target.x86_64-unknown-linux-gnu] diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary index eae3a123bf6..2388ffa0314 100644 --- a/.github/actions/dockerfiles/Dockerfile.alpine-binary +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -7,19 +7,16 @@ ARG TARGETPLATFORM ARG BUILDPLATFORM ARG TARGETARCH ARG TARGETVARIANT -ARG REPO +ARG REPO=stacks-network/stacks-core -RUN case ${TARGETPLATFORM} in \ - linux/amd64/v2) BIN_ARCH=linux-glibc-x64-v2 ;; \ - linux/amd64*) BIN_ARCH=linux-glibc-x64 ;; \ - linux/arm64*) BIN_ARCH=linux-glibc-arm64 ;; \ - linux/arm/v7) BIN_ARCH=linux-glibc-armv7 ;; \ - *) exit 1 ;; \ +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "arm") BIN_ARCH=linux-musl-armv7 ;; \ + "*") exit 1 ;; \ esac \ - && echo "TARGETPLATFORM: $TARGETPLATFORM" \ - && echo "BIN_ARCH: $BIN_ARCH" \ - && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ - && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} alpine diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary index f4461908538..4cec3c4391e 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-binary +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -7,19 +7,16 @@ ARG TARGETPLATFORM ARG BUILDPLATFORM ARG TARGETARCH ARG TARGETVARIANT -ARG REPO +ARG REPO=stacks-network/stacks-core -RUN case ${TARGETPLATFORM} in \ - linux/amd64/v2) BIN_ARCH=linux-glibc-x64-v2 ;; \ - linux/amd64*) BIN_ARCH=linux-glibc-x64 ;; \ - linux/arm64*) BIN_ARCH=linux-glibc-arm64 ;; \ - linux/arm/v7) BIN_ARCH=linux-glibc-armv7 ;; \ - *) exit 1 ;; \ +RUN case ${TARGETARCH} in \ + "amd64") BIN_ARCH=linux-musl-x64 ;; \ + "arm64") BIN_ARCH=linux-musl-arm64 ;; \ + "arm") BIN_ARCH=linux-musl-armv7 ;; \ + "*") exit 1 ;; \ esac \ - && echo "TARGETPLATFORM: $TARGETPLATFORM" \ - && echo "BIN_ARCH: $BIN_ARCH" \ - && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ - && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ + && echo "wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip" \ + && wget -q https://github.com/${REPO}/releases/download/${TAG}/${BIN_ARCH}.zip -O /${BIN_ARCH}.zip \ && unzip ${BIN_ARCH}.zip -d /out FROM --platform=${TARGETPLATFORM} debian:bookworm diff --git a/.github/actions/dockerfiles/Dockerfile.debian-source b/.github/actions/dockerfiles/Dockerfile.debian-source index b8da585fe2e..13f45536133 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-source +++ b/.github/actions/dockerfiles/Dockerfile.debian-source @@ -1,13 +1,10 @@ -FROM rust:bookworm as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' ARG GIT_COMMIT='No Commit Info' ARG BUILD_DIR=/build ARG TARGET=x86_64-unknown-linux-gnu -# Allow us to override the default `--target-cpu` for the given target triplet -ARG TARGET_CPU -ENV RUSTFLAGS="${TARGET_CPU:+${RUSTFLAGS} -Ctarget-cpu=${TARGET_CPU}}" WORKDIR /src COPY . . @@ -19,7 +16,7 @@ RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ && cd ${BUILD_DIR} \ && rustup target add ${TARGET} \ && rustup component add rustfmt \ - && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} \ + && cargo build --features monitoring_prom,slog_json,portable --release --workspace --target ${TARGET} \ && mkdir -p /out \ && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out diff --git a/.github/workflows/atlas-tests.yml b/.github/workflows/atlas-tests.yml index 1ea78e54112..8cb6b6bcc91 100644 --- a/.github/workflows/atlas-tests.yml +++ b/.github/workflows/atlas-tests.yml @@ -54,17 +54,3 @@ jobs: uses: stacks-network/actions/codecov@main with: test-name: ${{ matrix.test-name }} - - check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - atlas-tests - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index a832bc22349..e1e4fff7653 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -31,7 +31,7 @@ jobs: test-name: - tests::bitcoin_regtest::bitcoind_integration_test - tests::integrations::integration_test_get_info - - tests::neon_integrations::antientropy_integration_test + - tests::neon_integrations::antientropy_integration_test - tests::neon_integrations::bad_microblock_pubkey - tests::neon_integrations::bitcoind_forking_test - tests::neon_integrations::bitcoind_integration_test @@ -59,7 +59,6 @@ jobs: - tests::neon_integrations::size_overflow_unconfirmed_stream_microblocks_integration_test - tests::neon_integrations::stx_delegate_btc_integration_test - tests::neon_integrations::stx_transfer_btc_integration_test - - tests::neon_integrations::stack_stx_burn_op_test - tests::neon_integrations::test_chainwork_first_intervals - tests::neon_integrations::test_chainwork_partial_interval - tests::neon_integrations::test_flash_block_skip_tenure @@ -71,25 +70,17 @@ jobs: - tests::neon_integrations::use_latest_tip_integration_test - tests::neon_integrations::confirm_unparsed_ongoing_ops - tests::neon_integrations::min_txs - - tests::neon_integrations::vote_for_aggregate_key_burn_op_test - - tests::epoch_25::microblocks_disabled - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb - tests::nakamoto_integrations::correct_burn_outs - - tests::nakamoto_integrations::vote_for_aggregate_key_burn_op - - tests::nakamoto_integrations::follower_bootup - tests::signer::stackerdb_dkg - tests::signer::stackerdb_sign - tests::signer::stackerdb_block_proposal - tests::signer::stackerdb_filter_bad_transactions - tests::signer::stackerdb_mine_2_nakamoto_reward_cycles - - tests::signer::stackerdb_sign_after_signer_reboot - - tests::nakamoto_integrations::stack_stx_burn_op_integration_test - # Do not run this one until we figure out why it fails in CI - # - tests::neon_integrations::bitcoin_reorg_flap steps: ## Setup test environment - name: Setup Test Environment @@ -97,7 +88,7 @@ jobs: uses: stacks-network/actions/stacks-core/testenv@main with: btc-version: "25.0" - + ## Run test matrix using restored cache of archive file ## - Test will timeout after env.TEST_TIMEOUT minutes - name: Run Tests @@ -114,17 +105,3 @@ jobs: uses: stacks-network/actions/codecov@main with: test-name: ${{ matrix.test-name }} - - check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - integration-tests - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 84e94a9bba3..434c977a560 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -24,6 +24,16 @@ on: - reopened - synchronize - ready_for_review + paths-ignore: + - "**.md" + - "**.yml" + ## might be better to use inclusive v exclusive paths here, ex: + # paths: + # - "**.rs" + # - "**.clar" + pull_request_review: + types: + - submitted defaults: run: @@ -45,6 +55,15 @@ jobs: ## - PR review comment ## - PR change is requested rustfmt: + if: | + !( + github.event_name == 'pull_request_review' && + github.event.action == 'submitted' && + ( + github.event.review.state == 'commented' || + github.event.review.state == 'changes_requested' + ) + ) name: Rust Format runs-on: ubuntu-latest steps: @@ -70,9 +89,11 @@ jobs: ## ## Runs when the following is true: ## - tag is provided + ## - workflow is building default branch (master) create-release: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Create Release needs: - rustfmt @@ -85,9 +106,22 @@ jobs: ## ## Runs when: ## - tag is not provided + ## and the following are not true: + ## - PR review submitted (not approved) + ## and any of: + ## - PR review comment + ## - PR change is requested docker-image: if: | - inputs.tag == '' + inputs.tag == '' && + !( + github.event_name == 'pull_request_review' && + github.event.action == 'submitted' && + ( + github.event.review.state == 'commented' || + github.event.review.state == 'changes_requested' + ) + ) name: Docker Image (Source) uses: ./.github/workflows/image-build-source.yml needs: @@ -101,6 +135,7 @@ jobs: ## or: ## - no tag provided ## and any of: + ## - PR is approved (any approval will trigger) ## - this workflow is called manually ## - PR is opened ## - commit to either (development, master) branch @@ -108,6 +143,11 @@ jobs: if: | inputs.tag != '' || ( inputs.tag == '' && ( + ( + github.event_name == 'pull_request_review' && + github.event.action == 'submitted' && + github.event.review.state == 'approved' + ) || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' || @@ -130,28 +170,16 @@ jobs: ## ## Runs when: ## - tag is provided - ## or: - ## - no tag provided - ## and any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - ## - commit to either (development, next, master) branch + ## either or of the following: + ## - tag is not provided + ## - PR is approved stacks-core-tests: if: | inputs.tag != '' || ( - inputs.tag == '' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + inputs.tag == '' || ( + github.event_name == 'pull_request_review' && + github.event.action == 'submitted' && + github.event.review.state == 'approved' ) ) name: Stacks Core Tests @@ -163,18 +191,10 @@ jobs: bitcoin-tests: if: | inputs.tag != '' || ( - inputs.tag == '' && ( - github.event_name == 'workflow_dispatch' || - github.event_name == 'pull_request' || - github.event_name == 'merge_group' || - ( - contains(' - refs/heads/master - refs/heads/develop - refs/heads/next - ', github.event.pull_request.head.ref) && - github.event_name == 'push' - ) + inputs.tag == '' || ( + github.event_name == 'pull_request_review' && + github.event.action == 'submitted' && + github.event.review.state == 'approved' ) ) name: Bitcoin Tests diff --git a/.github/workflows/create-source-binary-x64.yml b/.github/workflows/create-source-binary-x64.yml new file mode 100644 index 00000000000..a1b435aa5f2 --- /dev/null +++ b/.github/workflows/create-source-binary-x64.yml @@ -0,0 +1,78 @@ +## Github workflow to create multiarch binaries from source + +name: Create Binaries for x86_64 + +on: + workflow_call: + inputs: + tag: + description: "Tag name of this release (x.y.z)" + required: true + type: string + arch: + description: "Stringified JSON object listing of platform matrix" + required: false + type: string + default: >- + ["linux-glibc-x64", "linux-musl-x64", "macos-x64", "windows-x64"] + cpu: + description: "Stringified JSON object listing of target CPU matrix" + required: false + type: string + default: >- + ["x86-64", "x86-64-v3"] + +## change the display name to the tag being built +run-name: ${{ inputs.tag }} + +concurrency: + group: create-binary-${{ github.head_ref || github.ref || github.run_id}} + ## Only cancel in progress if this is for a PR + cancel-in-progress: ${{ github.event_name == 'pull_request' }} + +jobs: + ## Runs when the following is true: + ## - tag is provided + ## - workflow is building default branch (master) + artifact: + if: | + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) + name: Build Binaries + runs-on: ubuntu-latest + strategy: + ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch + max-parallel: 10 + matrix: + platform: ${{ fromJson(inputs.arch) }} + cpu: ${{ fromJson(inputs.cpu) }} + steps: + ## Setup Docker for the builds + - name: Docker setup + uses: stacks-network/actions/docker@main + + ## Build the binaries using defined dockerfiles + - name: Build Binary (${{ matrix.platform }}_${{ matrix.cpu }}) + id: build_binaries + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # 5.0.0 + with: + file: build-scripts/Dockerfile.${{ matrix.platform }} + outputs: type=local,dest=./release/${{ matrix.platform }} + build-args: | + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + OS_ARCH=${{ matrix.platform }} + TARGET_CPU=${{ matrix.cpu }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + + ## Compress the binary artifact + - name: Compress artifact + id: compress_artifact + run: zip --junk-paths ${{ matrix.platform }}_${{ matrix.cpu }} ./release/${{ matrix.platform }}/* + + ## Upload the binary artifact to the github action (used in `github-release.yml` to create a release) + - name: Upload artifact + id: upload_artifact + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + with: + path: ${{ matrix.platform }}_${{ matrix.cpu }}.zip diff --git a/.github/workflows/create-source-binary.yml b/.github/workflows/create-source-binary.yml index 385b30af7db..068170efc53 100644 --- a/.github/workflows/create-source-binary.yml +++ b/.github/workflows/create-source-binary.yml @@ -9,6 +9,12 @@ on: description: "Tag name of this release (x.y.z)" required: true type: string + arch: + description: "Stringified JSON object listing of platform matrix" + required: false + type: string + default: >- + ["linux-glibc-arm64", "linux-glibc-armv7", "linux-musl-arm64", "linux-musl-armv7"] ## change the display name to the tag being built run-name: ${{ inputs.tag }} @@ -21,40 +27,44 @@ concurrency: jobs: ## Runs when the following is true: ## - tag is provided + ## - workflow is building default branch (master) artifact: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Build Binaries runs-on: ubuntu-latest strategy: ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch max-parallel: 10 matrix: - arch: - - linux-musl - - linux-glibc - - macos - - windows - cpu: - - arm64 - - armv7 - - x86-64 ## defaults to x86-64-v3 variant - intel haswell (2013) and newer - # - x86-64-v2 ## intel nehalem (2008) and newer - # - x86-64-v3 ## intel haswell (2013) and newer - # - x86-64-v4 ## intel skylake (2017) and newer - exclude: - - arch: windows # excludes windows-arm64 - cpu: arm64 - - arch: windows # excludes windows-armv7 - cpu: armv7 - - arch: macos # excludes macos-armv7 - cpu: armv7 - + platform: ${{ fromJson(inputs.arch) }} steps: - - name: Build Binary (${{ matrix.arch }}_${{ matrix.cpu }}) - id: build_binary - uses: stacks-network/actions/stacks-core/create-source-binary@main + ## Setup Docker for the builds + - name: Docker setup + uses: stacks-network/actions/docker@main + + ## Build the binaries using defined dockerfiles + - name: Build Binary (${{ matrix.platform }}) + id: build_binaries + uses: docker/build-push-action@0565240e2d4ab88bba5387d719585280857ece09 # 5.0.0 + with: + file: build-scripts/Dockerfile.${{ matrix.platform }} + outputs: type=local,dest=./release/${{ matrix.platform }} + build-args: | + STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} + OS_ARCH=${{ matrix.platform }} + GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} + GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} + + ## Compress the binary artifact + - name: Compress artifact + id: compress_artifact + run: zip --junk-paths ${{ matrix.platform }} ./release/${{ matrix.platform }}/* + + ## Upload the binary artifact to the github action (used in `github-release.yml` to create a release) + - name: Upload artifact + id: upload_artifact + uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 with: - arch: ${{ matrix.arch }} - cpu: ${{ matrix.cpu }} - tag: ${{ inputs.tag }} + path: ${{ matrix.platform }}.zip diff --git a/.github/workflows/epoch-tests.yml b/.github/workflows/epoch-tests.yml index be00618d505..a50e0d344d3 100644 --- a/.github/workflows/epoch-tests.yml +++ b/.github/workflows/epoch-tests.yml @@ -78,17 +78,3 @@ jobs: uses: stacks-network/actions/codecov@main with: test-name: ${{ matrix.test-name }} - - check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - epoch-tests - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 0b0b1833290..17d75b2d0ed 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -25,25 +25,44 @@ jobs: ## ## Runs when the following is true: ## - tag is provided + ## - workflow is building default branch (master) build-binaries: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Build Binaries uses: ./.github/workflows/create-source-binary.yml with: tag: ${{ inputs.tag }} secrets: inherit + ## Build x86_64 binaries from source + ## + ## Runs when the following is true: + ## - tag is provided + ## - workflow is building default branch (master) + build-binaries-x64: + if: | + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) + name: Build Binaries (x64_64) + uses: ./.github/workflows/create-source-binary-x64.yml + with: + tag: ${{ inputs.tag }} + secrets: inherit + ## Runs when the following is true: ## - tag is provided ## - workflow is building default branch (master) create-release: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Create Release runs-on: ubuntu-latest needs: - build-binaries + - build-binaries-x64 steps: ## Downloads the artifacts built in `create-source-binary.yml` - name: Download Artifacts @@ -56,7 +75,12 @@ jobs: ## Generate a checksums file to be added to the release page - name: Generate Checksums id: generate_checksum - uses: stacks-network/actions/generate-checksum@main + uses: jmgilman/actions-generate-checksum@24a35957fba81c6cbaefeb1e3d59ee56e3db5077 # v1.0.0 + with: + method: sha512 + output: CHECKSUMS.txt + patterns: | + release/*.zip ## Upload the release archives with the checksums file - name: Upload Release @@ -81,11 +105,13 @@ jobs: ## - workflow is building default branch (master) docker-image: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Docker Image (Binary) uses: ./.github/workflows/image-build-binary.yml needs: - build-binaries + - build-binaries-x64 - create-release with: tag: ${{ inputs.tag }} diff --git a/.github/workflows/image-build-binary.yml b/.github/workflows/image-build-binary.yml index 74415e7f16a..cab5ff162b8 100644 --- a/.github/workflows/image-build-binary.yml +++ b/.github/workflows/image-build-binary.yml @@ -8,11 +8,16 @@ on: tag: required: true type: string - description: "Version tag for docker images" + description: "Version tag for alpine images" + docker-org: + required: false + type: string + description: "Docker repo org for uploading images (defaults to github org)" + default: "${GITHUB_REPOSITORY_OWNER}" ## Define which docker arch to build for env: - docker_platforms: "linux/arm64, linux/arm/v7, linux/amd64, linux/amd64/v3" + docker_platforms: "linux/arm64, linux/arm/v7, linux/amd64, linux/amd64/v2, linux/amd64/v3" docker-org: blockstack concurrency: @@ -28,7 +33,8 @@ jobs: ## - workflow is building default branch (master) image: if: | - inputs.tag != '' + inputs.tag != '' && + github.ref == format('refs/heads/{0}', github.event.repository.default_branch) name: Build Image runs-on: ubuntu-latest strategy: @@ -42,39 +48,28 @@ jobs: steps: ## Setup Docker for the builds - name: Docker setup - id: docker_setup uses: stacks-network/actions/docker@main with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - ## if the repo owner is not `stacks-network`, default to a docker-org of the repo owner (i.e. github user id) - ## this allows forks to run the docker push workflows without having to hardcode a dockerhub org (but it does require docker hub user to match github username) - - name: Set Local env vars - id: set_env - if: | - github.repository_owner != 'stacks-network' - run: | - echo "docker-org=${{ github.repository_owner }}" >> "$GITHUB_ENV" - ## Set docker metatdata ## - depending on the matrix.dist, different tags will be enabled - ## ex. debian will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'debian' }}` + ## ex. alpine will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'alpine' }}` - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata uses: docker/metadata-action@96383f45573cb7f253c731d3b3ab81c87ef81934 #v5.0.0 with: - ## tag images with current repo name `stacks-core` as well as legacy `stacks-blockchain` images: | ${{env.docker-org}}/${{ github.event.repository.name }} ${{env.docker-org}}/stacks-blockchain tags: | - type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'debian'}} - type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' && matrix.dist == 'debian' }} - type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} - type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} - type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'alpine' }} + type=raw,value=latest,enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} + type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'alpine'}} + type=raw,value=${{ inputs.tag }},enable=${{ inputs.tag != '' && matrix.dist == 'alpine' }} + type=ref,event=tag,enable=${{ matrix.dist == 'alpine' }} + type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} + type=raw,value=${{ inputs.tag }}-${{ matrix.dist }},enable=${{ inputs.tag != '' && matrix.dist == 'debian' }} ## Build docker image for release - name: Build and Push ( ${{matrix.dist}} ) diff --git a/.github/workflows/image-build-source.yml b/.github/workflows/image-build-source.yml index ebb9afc6790..1936999b27a 100644 --- a/.github/workflows/image-build-source.yml +++ b/.github/workflows/image-build-source.yml @@ -8,7 +8,7 @@ on: ## Define which docker arch to build for env: - docker_platforms: "linux/amd64" + docker_platforms: linux/amd64 docker-org: blockstack concurrency: @@ -31,21 +31,11 @@ jobs: steps: ## Setup Docker for the builds - name: Docker setup - id: docker_setup uses: stacks-network/actions/docker@main with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_PASSWORD }} - ## if the repo owner is not `stacks-network`, default to a docker-org of the repo owner (i.e. github user id) - ## this allows forks to run the docker push workflows without having to hardcode a dockerhub org (but it does require docker hub user to match github username) - - name: Set Local env vars - id: set_env - if: | - github.repository_owner != 'stacks-network' - run: | - echo "docker-org=${{ github.repository_owner }}" >> "$GITHUB_ENV" - ## Set docker metatdata - name: Docker Metadata ( ${{matrix.dist}} ) id: docker_metadata @@ -68,8 +58,8 @@ jobs: tags: ${{ steps.docker_metadata.outputs.tags }} labels: ${{ steps.docker_metadata.outputs.labels }} build-args: | + REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} STACKS_NODE_VERSION=${{ env.GITHUB_SHA_SHORT }} GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - TARGET_CPU=x86-64-v3 push: ${{ env.DOCKER_PUSH }} diff --git a/.github/workflows/slow-tests.yml b/.github/workflows/slow-tests.yml index bce6a15a1f0..0c2cb62ea46 100644 --- a/.github/workflows/slow-tests.yml +++ b/.github/workflows/slow-tests.yml @@ -56,17 +56,3 @@ jobs: uses: stacks-network/actions/codecov@main with: test-name: ${{ matrix.test-name }} - - check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - slow-tests - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" diff --git a/.github/workflows/stacks-core-tests.yml b/.github/workflows/stacks-core-tests.yml index 3195f279fcb..5105c6535e1 100644 --- a/.github/workflows/stacks-core-tests.yml +++ b/.github/workflows/stacks-core-tests.yml @@ -87,7 +87,7 @@ jobs: uses: stacks-network/actions/stacks-core/testenv@main with: btc-version: "25.0" - + ## Run test matrix using restored cache of archive file ## - Test will timeout after env.TEST_TIMEOUT minutes - name: Run Tests @@ -168,6 +168,7 @@ jobs: # Core contract tests on Clarinet v1 # Check for false positives/negatives + # https://github.com/stacks-network/stacks-blockchain/pull/4031#pullrequestreview-1713341208 core-contracts-clarinet-test-clarinet-v1: name: Core Contracts Test Clarinet V1 runs-on: ubuntu-latest @@ -181,19 +182,3 @@ jobs: with: args: test --manifest-path=./contrib/core-contract-tests/Clarinet.toml contrib/core-contract-tests/tests/bns/name_register_test.ts - check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - full-genesis - - unit-tests - - open-api-validation - - core-contracts-clarinet-test - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" diff --git a/CHANGELOG.md b/CHANGELOG.md index 33fe1be833f..9bab7a19d41 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,13 +17,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Functions that include a `signer-key` parameter also include a `signer-sig` parameter to demonstrate that the owner of `signer-key` is approving that particular Stacking operation. For more details, refer to the `verify-signer-key-sig` method in the `pox-4` contract. - Signer key authorizations can be added via `set-signer-key-authorization` to omit the need for `signer-key` signatures - A `max-amount` field is a field in signer key authorizations and defines the maximum amount of STX that can be locked in a single transaction. -- Added configuration parameters to customize the burn block at which to start processing Stacks blocks, when running on testnet or regtest. - ``` - [burnchain] - first_burn_block_height = 2582526 - first_burn_block_timestamp = 1710780828 - first_burn_block_hash = "000000000000001a17c68d43cb577d62074b63a09805e4a07e829ee717507f66" - ``` ### Modified diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9ffcfb80f78..0101858b628 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -59,8 +59,8 @@ is responsible for: 6. Merging the new PR. For an example of this process, see PRs -[#3598](https://github.com/stacks-network/stacks-core/pull/3598) and -[#3626](https://github.com/stacks-network/stacks-core/pull/3626). +[#3598](https://github.com/stacks-network/stacks-blockchain/pull/3598) and +[#3626](https://github.com/stacks-network/stacks-blockchain/pull/3626). ### Documentation Updates @@ -226,7 +226,7 @@ Contributions should not contain `unsafe` blocks if at all possible. ## Documentation * Each file must have a **copyright statement**. -* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). +* Any new non-test modules should have **module-level documentation** explaining what the module does, and how it fits into the blockchain as a whole ([example](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L17)). * Any new files must have some **top-of-file documentation** that describes what the contained code does, and how it fits into the overall module. Within the source files, the following **code documentation** standards are expected: @@ -247,7 +247,7 @@ Within the source files, the following **code documentation** standards are expe handle I/O reads and writes in an "outer" function. The "outer" function only does the needful I/O and passes the data into the "inner" function. The "inner" function is often private, whereas - the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). + the "outer" function is often public. For example, [`inner_try_mine_microblock` and `try_mine_microblock`](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/testnet/stacks-node/src/neon_node.rs#L1148-L1216). ## Refactoring @@ -281,7 +281,7 @@ Within the source files, the following **code documentation** standards are expe does not decode with the allotted resources, then no further processing may be done and the data is discarded. For an example, see how the parsing functions in the http module use `BoundReader` and - `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-core/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). + `MAX_PAYLOAD_LEN` in [http.rs](https://github.com/stacks-network/stacks-blockchain/blob/4852d6439b473e24705f14b8af637aded33cb422/src/net/http.rs#L2260-L2285). * **All network input reception is time-bound.** Every piece of code that ingests data _from the network_ must impose a maximum amount of time that ingestion can take. If the data takes too long to arrive, then it must be discarded without any further processing. There is no time bound for data ingested from disk or passed as an argument; this requirement is meant by the space-bound requirement. @@ -303,7 +303,7 @@ Changes to the peer network should be deployed incrementally and tested by multi Any PRs that claim to improve performance **must ship with reproducible benchmarks** that accurately measure the improvement. This data must also be reported in the PR submission. -For an example, see [PR #3075](https://github.com/stacks-network/stacks-core/pull/3075). +For an example, see [PR #3075](https://github.com/stacks-network/stacks-blockchain/pull/3075). ## Error Handling @@ -597,7 +597,7 @@ Keep in mind that better variable names can reduce the need for comments, e.g.: # Licensing and contributor license agreement -`stacks-core` is released under the terms of the GPL version 3. Contributions +`stacks-blockchain` is released under the terms of the GPL version 3. Contributions that are not licensed under compatible terms will be rejected. Moreover, contributions will not be accepted unless _all_ authors accept the project's contributor license agreement. diff --git a/Cargo.lock b/Cargo.lock index 9cfd1ad9a1e..6f72aba99de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -88,7 +88,7 @@ dependencies = [ "aes 0.8.4", "cipher 0.4.4", "ctr 0.9.2", - "ghash 0.5.0", + "ghash 0.5.1", "subtle", ] @@ -120,9 +120,9 @@ checksum = "0453232ace82dee0dd0b4c87a59bd90f7b53b314f3e0f61fe2ee7c8a16482289" [[package]] name = "ahash" -version = "0.8.8" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42cd52102d3df161c77a887b608d7a4897d7cc112886a9537b738a887a03aaff" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -162,9 +162,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -210,9 +210,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "arrayvec" @@ -265,7 +265,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28243a43d821d11341ab73c80bed182dc015c514b951616cf79bd4af39af0c3" dependencies = [ "concurrent-queue", - "event-listener 5.0.0", + "event-listener 5.2.0", "event-listener-strategy 0.5.0", "futures-core", "pin-project-lite", @@ -359,7 +359,7 @@ dependencies = [ "futures-io", "futures-lite 2.2.0", "parking", - "polling 3.4.0", + "polling 3.5.0", "rustix 0.38.31", "slab", "tracing", @@ -498,6 +498,21 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitflags" version = "1.3.2" @@ -573,9 +588,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "byte-slice-cast" @@ -603,12 +618,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.0.83" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" -dependencies = [ - "libc", -] +checksum = "a0ba8f7aaa012f30d5b2861462f6708eccd49c3c39863fe083a308035f63d723" [[package]] name = "cfg-if" @@ -633,7 +645,7 @@ dependencies = [ "js-sys", "num-traits", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -674,9 +686,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80c21025abd42669a92efc996ef13cfb2c5c627858421ea58d5c3b331a6c134f" +checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" dependencies = [ "clap_builder", "clap_derive", @@ -684,9 +696,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.0" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458bf1f341769dfcf849846f65dffdf9146daa56bcd2a47cb4e1de9915567c99" +checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" dependencies = [ "anstream", "anstyle", @@ -703,7 +715,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -717,10 +729,10 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff", - "hashbrown 0.14.3", + "clarity", "integer-sqrt", "lazy_static", - "mutants", + "proptest", "rand 0.8.5", "rand_chacha 0.3.1", "regex", @@ -986,7 +998,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1145,9 +1157,9 @@ dependencies = [ [[package]] name = "event-listener" -version = "5.0.0" +version = "5.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b72557800024fabbaa2449dd4bf24e37b93702d457a4d4f2b0dd1f0f039f20c1" +checksum = "2b5fb89194fa3cad959b833185b3063ba881dbfc7030680b314250779fb4cc91" dependencies = [ "concurrent-queue", "parking", @@ -1170,7 +1182,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "feedafcaa9b749175d5ac357452a9d41ea2911da598fde46ce1fe02c37751291" dependencies = [ - "event-listener 5.0.0", + "event-listener 5.2.0", "pin-project-lite", ] @@ -1352,7 +1364,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -1369,9 +1381,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -1444,12 +1456,12 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", - "polyval 0.6.1", + "polyval 0.6.2", ] [[package]] @@ -1491,9 +1503,9 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "1b43ede17f21864e81be2fa654110bf1e793774238d86ef8555c37e6519c0403" [[package]] name = "hashbrown" @@ -1510,7 +1522,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.8", + "ahash 0.8.11", "allocator-api2", "serde", ] @@ -1565,9 +1577,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -1606,9 +1618,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -1677,7 +1689,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -1753,9 +1765,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -1800,7 +1812,7 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eae7b9aee968036d54dce06cebaefd919e4472e753296daccd6d344e3e2df0c2" dependencies = [ - "hermit-abi 0.3.6", + "hermit-abi 0.3.9", "libc", "windows-sys 0.48.0", ] @@ -1820,6 +1832,17 @@ version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +[[package]] +name = "is-terminal" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f23ff5ef2b80d608d61efee834934d862cd92461afc0560dedf493e4c033738b" +dependencies = [ + "hermit-abi 0.3.9", + "libc", + "windows-sys 0.52.0", +] + [[package]] name = "itertools" version = "0.10.5" @@ -1837,9 +1860,9 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -1904,6 +1927,12 @@ dependencies = [ "rle-decode-fast", ] +[[package]] +name = "libm" +version = "0.2.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" + [[package]] name = "libredox" version = "0.0.1" @@ -1979,9 +2008,9 @@ checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" dependencies = [ "value-bag", ] @@ -2056,9 +2085,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2148,6 +2177,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" dependencies = [ "autocfg", + "libm", ] [[package]] @@ -2156,16 +2186,7 @@ version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" dependencies = [ - "hermit-abi 0.3.6", - "libc", -] - -[[package]] -name = "num_threads" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" -dependencies = [ + "hermit-abi 0.3.9", "libc", ] @@ -2192,9 +2213,9 @@ checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "overload" @@ -2204,9 +2225,9 @@ checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "p256k1" -version = "7.1.0" +version = "7.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a40a031a559eb38c35a14096f21c366254501a06d41c4b327d2a7515d713a5b7" +checksum = "3a64d160b891178fb9d43d1a58ddcafb6502daeb54d810e5e92a7c3c9bfacc07" dependencies = [ "bitvec", "bs58 0.4.0", @@ -2221,7 +2242,7 @@ dependencies = [ "rustfmt-wrapper", "serde", "sha2 0.10.8", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2264,9 +2285,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.7" +version = "2.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "56f8023d0fb78c8e03784ea1c7f3fa36e68a723138990b8d5a47d916b651e7a8" dependencies = [ "memchr", "thiserror", @@ -2296,7 +2317,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -2390,9 +2411,9 @@ dependencies = [ [[package]] name = "polling" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" dependencies = [ "cfg-if 1.0.0", "concurrent-queue", @@ -2425,9 +2446,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if 1.0.0", "cpufeatures", @@ -2529,6 +2550,26 @@ dependencies = [ "thiserror", ] +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.4.2", + "lazy_static", + "num-traits", + "rand 0.8.5", + "rand_chacha 0.3.1", + "rand_xorshift", + "regex-syntax 0.8.2", + "rusty-fork", + "tempfile", + "unarray", +] + [[package]] name = "protobuf" version = "2.28.0" @@ -2544,6 +2585,12 @@ dependencies = [ "cc", ] +[[package]] +name = "quick-error" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" + [[package]] name = "quote" version = "1.0.35" @@ -2630,11 +2677,20 @@ dependencies = [ "rand_core 0.5.1", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core 0.6.4", +] + [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -2678,7 +2734,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -2693,9 +2749,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -2718,7 +2774,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown 0.14.3", + "stacks-common", ] [[package]] @@ -2779,16 +2835,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if 1.0.0", "getrandom 0.2.12", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2921,7 +2978,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -2971,7 +3028,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-webpki", "sct", ] @@ -2991,7 +3048,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -3001,11 +3058,23 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "same-file" @@ -3028,7 +3097,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -3071,9 +3140,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" [[package]] name = "semver-parser" @@ -3092,9 +3161,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -3111,20 +3180,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -3296,11 +3365,11 @@ dependencies = [ [[package]] name = "slog-term" -version = "2.9.0" +version = "2.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87d29185c55b7b258b4f120eab00f48557d4d9bc814f41713f449d35b0f8977c" +checksum = "b6e022d0b998abfe5c3782c1f03551a596269450ccd677ea51c56f8b214610e8" dependencies = [ - "atty", + "is-terminal", "slog", "term", "thread_local", @@ -3325,12 +3394,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3381,6 +3450,7 @@ dependencies = [ "libc", "nix", "percent-encoding", + "proptest", "rand 0.8.5", "rand_core 0.6.4", "ripemd", @@ -3397,6 +3467,7 @@ dependencies = [ "slog", "slog-json", "slog-term", + "stacks-common", "time 0.2.27", "winapi 0.3.9", "wsts", @@ -3412,7 +3483,6 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown 0.14.3", "http-types", "lazy_static", "libc", @@ -3447,17 +3517,14 @@ name = "stacks-signer" version = "0.0.1" dependencies = [ "backoff", - "clap 4.5.0", + "clap 4.5.1", "clarity", "hashbrown 0.14.3", "libsigner", "libstackerdb", - "num-traits", - "polynomial", "rand 0.8.5", "rand_core 0.6.4", "reqwest", - "rusqlite", "secp256k1", "serde", "serde_derive", @@ -3486,7 +3553,6 @@ dependencies = [ "criterion", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", "integer-sqrt", "lazy_static", "libc", @@ -3497,6 +3563,7 @@ dependencies = [ "percent-encoding", "pox-locking", "prometheus", + "proptest", "rand 0.8.5", "rand_chacha 0.3.1", "rand_core 0.6.4", @@ -3517,6 +3584,7 @@ dependencies = [ "slog-json", "slog-term", "stacks-common", + "stackslib", "stdext", "stx-genesis", "tikv-jemallocator", @@ -3629,9 +3697,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -3673,9 +3741,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if 1.0.0", "fastrand 2.0.1", @@ -3720,14 +3788,14 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if 1.0.0", "once_cell", @@ -3776,9 +3844,7 @@ checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", "itoa", - "libc", "num-conv", - "num_threads", "powerfmt", "serde", "time-core", @@ -3870,10 +3936,10 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 0.8.10", + "mio 0.8.11", "num_cpus", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "windows-sys 0.48.0", ] @@ -3942,7 +4008,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.5", + "toml_edit 0.22.6", ] [[package]] @@ -3967,15 +4033,15 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.22.5" +version = "0.22.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99e68c159e8f5ba8a28c4eb7b0c0c190d77bb479047ca713270048145a9ad28a" +checksum = "2c1b5fd4128cc8d3e0cb74d4ed9a9cc7c7284becd4df68f5f940e1ad123606f6" dependencies = [ "indexmap", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.1", + "winnow 0.6.5", ] [[package]] @@ -3987,7 +4053,7 @@ dependencies = [ "home", "once_cell", "regex", - "semver 1.0.21", + "semver 1.0.22", "walkdir", ] @@ -4017,7 +4083,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] @@ -4108,6 +4174,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicase" version = "2.7.0" @@ -4131,9 +4203,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -4224,6 +4296,15 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "waker-fn" version = "1.1.1" @@ -4232,9 +4313,9 @@ checksum = "f3c4517f54858c779bbcbf228f4fca63d121bf85fbecb2dc578cdf4a39395690" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -4294,9 +4375,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -4304,24 +4385,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -4331,9 +4412,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -4341,28 +4422,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -4423,7 +4504,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -4441,7 +4522,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -4461,17 +4542,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -4482,9 +4563,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -4494,9 +4575,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -4506,9 +4587,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -4518,9 +4599,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -4530,9 +4611,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -4542,9 +4623,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -4554,9 +4635,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -4569,9 +4650,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.1" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d90f4e0f530c4c69f62b80d839e9ef3855edc9cba471a160c4d692deed62b401" +checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" dependencies = [ "memchr", ] @@ -4598,9 +4679,9 @@ dependencies = [ [[package]] name = "wsts" -version = "9.0.0" +version = "8.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c80d57a61294350ed91e91eb20a6c34da084ec8f15d039bab79ce3efabbd1a4" +checksum = "467aa8e40ed0277d19922fd0e7357c16552cb900e5138f61a48ac23c4b7878e0" dependencies = [ "aes-gcm 0.10.3", "bs58 0.5.0", @@ -4644,7 +4725,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.52", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index feab983833c..24b9f333f3d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -10,17 +10,19 @@ members = [ "contrib/tools/relay-server", "libsigner", "stacks-signer", - "testnet/stacks-node"] + "testnet/stacks-node" +] # Dependencies we want to keep the same between workspace members -[workspace.dependencies] -ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } +[workspace.dependencies] +ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } hashbrown = "0.14.3" rand_core = "0.6" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" -wsts = { version = "9.0.0", default-features = false } +wsts = { version = "8.1", default-features = false } +proptest = { version = "1.4.0" } # Use a bit more than default optimization for # dev builds to speed up test execution @@ -37,9 +39,3 @@ opt-level = 3 debug = true codegen-units = 1 lto = "fat" - -# Release build with less LTO -# Useful for faster builds or low-RAM environments -[profile.release-lite] -inherits = "release" -lto = "thin" diff --git a/Dockerfile.debian b/Dockerfile.debian index 8b6759527ed..4b9a56b8c5a 100644 --- a/Dockerfile.debian +++ b/Dockerfile.debian @@ -1,4 +1,4 @@ -FROM rust:bookworm as build +FROM rust:bullseye as build ARG STACKS_NODE_VERSION="No Version Info" ARG GIT_BRANCH='No Branch Info' @@ -14,8 +14,9 @@ RUN cd testnet/stacks-node && cargo build --features monitoring_prom,slog_json - RUN cp target/release/stacks-node /out -FROM debian:bookworm-slim +FROM debian:bullseye-slim +RUN apt update && apt install -y netcat COPY --from=build /out/ /bin/ CMD ["stacks-node", "mainnet"] diff --git a/README.md b/README.md index 3f91b1a9f21..e61829ff304 100644 --- a/README.md +++ b/README.md @@ -11,8 +11,8 @@ Reference implementation of the [Stacks blockchain](https://github.com/stacks-ne Stacks is a layer-2 blockchain that uses Bitcoin as a base layer for security and enables decentralized apps and predictable smart contracts using the [Clarity language](https://clarity-lang.org/). Stacks implements [Proof of Transfer (PoX)](https://community.stacks.org/pox) mining that anchors to Bitcoin security. Leader election happens at the Bitcoin blockchain and Stacks (STX) miners write new blocks on the separate Stacks blockchain. With PoX there is no need to modify Bitcoin to enable smart contracts and decentralized apps. [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg?style=flat)](https://www.gnu.org/licenses/gpl-3.0) -[![Release](https://img.shields.io/github/v/release/stacks-network/stacks-core?style=flat)](https://github.com/stacks-network/stacks-core/releases/latest) -[![Build Status](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml/badge.svg?branch=master&event=workflow_dispatch&style=flat)](https://github.com/stacks-network/stacks-core/actions/workflows/ci.yml?query=event%3Aworkflow_dispatch+branch%3Amaster) +[![Release](https://img.shields.io/github/v/release/stacks-network/stacks-blockchain?style=flat)](https://github.com/stacks-network/stacks-blockchain/releases/latest) +[![Build Status](https://github.com/stacks-network/stacks-blockchain/actions/workflows/ci.yml/badge.svg?branch=master&event=workflow_dispatch&style=flat)](https://github.com/stacks-network/stacks-blockchain/actions/workflows/ci.yml?query=event%3Aworkflow_dispatch+branch%3Amaster) [![Discord Chat](https://img.shields.io/discord/621759717756370964.svg)](https://stacks.chat) ## Building @@ -22,44 +22,28 @@ Stacks is a layer-2 blockchain that uses Bitcoin as a base layer for security an _For building on Windows, follow the rustup installer instructions at https://rustup.rs/._ ```bash -curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -source $HOME/.cargo/env -rustup component add rustfmt +$ curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh +$ source $HOME/.cargo/env +$ rustup component add rustfmt ``` -- When building the [`master`](https://github.com/stacks-network/stacks-core/tree/master) branch, ensure you are using the latest stable release: +- When building the [`master`](https://github.com/stacks-network/stacks-blockchain/tree/master) branch, ensure you are using the latest stable release: ```bash -rustup update +$ rustup update ``` ### 2. Clone the source repository: ```bash -git clone --depth=1 https://github.com/stacks-network/stacks-core.git -cd stacks-core +$ git clone --depth=1 https://github.com/stacks-network/stacks-blockchain.git +$ cd stacks-blockchain ``` ### 3. Build the project ```bash -# Fully optimized release build -cargo build --release -# Faster but less optimized build. Necessary if < 16 GB RAM -cargo build --profile release-lite -``` - -_Note on building_: you may set `RUSTFLAGS` to build binaries for your native cpu: - -``` -RUSTFLAGS="-Ctarget-cpu=native" -``` - -or uncomment these lines in `./cargo/config`: - -``` -# [build] -# rustflags = ["-Ctarget-cpu=native"] +$ cargo build ``` ## Testing @@ -67,15 +51,14 @@ or uncomment these lines in `./cargo/config`: **Run the tests:** ```bash -cargo test testnet -- --test-threads=1 +$ cargo test testnet -- --test-threads=1 ``` **Run all unit tests in parallel using [nextest](https://nexte.st/):** _Warning, this typically takes a few minutes_ - ```bash -cargo nextest run +$ cargo nextest run ``` ## Run the testnet @@ -83,8 +66,8 @@ cargo nextest run You can observe the state machine in action locally by running: ```bash -cd testnet/stacks-node -cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml +$ cd testnet/stacks-node +$ cargo run --bin stacks-node -- start --config ./conf/testnet-follower-conf.toml ``` _On Windows, many tests will fail if the line endings aren't `LF`. Please ensure that you are have git's `core.autocrlf` set to `input` when you clone the repository to avoid any potential issues. This is due to the Clarity language currently being sensitive to line endings._ diff --git a/build-scripts/Dockerfile.linux-glibc-arm64 b/build-scripts/Dockerfile.linux-glibc-arm64 new file mode 100644 index 00000000000..11e38f88048 --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-arm64 @@ -0,0 +1,26 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-gnu +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git gcc-aarch64-linux-gnu + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC=aarch64-linux-gnu-gcc \ + CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc \ + CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-glibc-armv7 b/build-scripts/Dockerfile.linux-glibc-armv7 new file mode 100644 index 00000000000..cc05298dfef --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-armv7 @@ -0,0 +1,26 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=armv7-unknown-linux-gnueabihf +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git gcc-arm-linux-gnueabihf + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC=arm-linux-gnueabihf-gcc \ + CC_armv7_unknown_linux_gnueabihf=arm-linux-gnueabihf-gcc \ + CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER=arm-linux-gnueabihf-gcc \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-glibc-x64 b/build-scripts/Dockerfile.linux-glibc-x64 new file mode 100644 index 00000000000..0e2bbdd9bee --- /dev/null +++ b/build-scripts/Dockerfile.linux-glibc-x64 @@ -0,0 +1,26 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-gnu +# Allow us to override the default `--target-cpu` for the given target triplet +ARG TARGET_CPU +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + ${TARGET_CPU:+RUSTFLAGS="$RUSTFLAGS $TARGET_CPU"} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-arm64 b/build-scripts/Dockerfile.linux-musl-arm64 new file mode 100644 index 00000000000..24a07f018a2 --- /dev/null +++ b/build-scripts/Dockerfile.linux-musl-arm64 @@ -0,0 +1,22 @@ +FROM messense/rust-musl-cross:aarch64-musl as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=aarch64-unknown-linux-musl +WORKDIR /src + +COPY . . + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.linux-musl-armv7 b/build-scripts/Dockerfile.linux-musl-armv7 new file mode 100644 index 00000000000..2ce5a999120 --- /dev/null +++ b/build-scripts/Dockerfile.linux-musl-armv7 @@ -0,0 +1,21 @@ +FROM messense/rust-musl-cross:armv7-musleabihf as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=armv7-unknown-linux-musleabihf +WORKDIR /src + +COPY . . + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / diff --git a/build-scripts/Dockerfile.linux-musl-x64 b/build-scripts/Dockerfile.linux-musl-x64 new file mode 100644 index 00000000000..d954708a0a7 --- /dev/null +++ b/build-scripts/Dockerfile.linux-musl-x64 @@ -0,0 +1,27 @@ +FROM rust:alpine as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-unknown-linux-musl +# Allow us to override the default `--target-cpu` for the given target triplet +ARG TARGET_CPU +WORKDIR /src + +COPY . . + +RUN apk update && apk add git musl-dev + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + ${TARGET_CPU:+RUSTFLAGS="$RUSTFLAGS $TARGET_CPU"} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.macos-arm64 b/build-scripts/Dockerfile.macos-arm64 new file mode 100644 index 00000000000..0fd8a1e4c38 --- /dev/null +++ b/build-scripts/Dockerfile.macos-arm64 @@ -0,0 +1,30 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=aarch64-apple-darwin +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y clang zstd + +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-aarch64 \ + && cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.macos-x64 b/build-scripts/Dockerfile.macos-x64 new file mode 100644 index 00000000000..f61d0574e9f --- /dev/null +++ b/build-scripts/Dockerfile.macos-x64 @@ -0,0 +1,32 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG OSXCROSS="https://github.com/hirosystems/docker-osxcross-rust/releases/download/MacOSX12.0.sdk/osxcross-d904031_MacOSX12.0.sdk.tar.zst" +ARG TARGET=x86_64-apple-darwin +ARG TARGET_CPU +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y clang zstd + +# Retrieve and install osxcross +RUN wget -nc -O /tmp/osxcross.tar.zst ${OSXCROSS} \ + && mkdir -p /opt/osxcross && tar -xaf /tmp/osxcross.tar.zst -C /opt/osxcross + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && . /opt/osxcross/env-macos-x86_64 \ + ${TARGET_CPU:+RUSTFLAGS="$RUSTFLAGS $TARGET_CPU"} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect /out/blockstack-cli /out/clarity-cli /out/stacks-node / + diff --git a/build-scripts/Dockerfile.windows-x64 b/build-scripts/Dockerfile.windows-x64 new file mode 100644 index 00000000000..3265c05b5c4 --- /dev/null +++ b/build-scripts/Dockerfile.windows-x64 @@ -0,0 +1,27 @@ +FROM rust:bullseye as build + +ARG STACKS_NODE_VERSION="No Version Info" +ARG GIT_BRANCH='No Branch Info' +ARG GIT_COMMIT='No Commit Info' +ARG BUILD_DIR=/build +ARG TARGET=x86_64-pc-windows-gnu +ARG TARGET_CPU +WORKDIR /src + +COPY . . + +RUN apt-get update && apt-get install -y git gcc-mingw-w64-x86-64 + +# Run all the build steps in ramdisk in an attempt to speed things up +RUN --mount=type=tmpfs,target=${BUILD_DIR} cp -R /src/. ${BUILD_DIR}/ \ + && cd ${BUILD_DIR} \ + && rustup target add ${TARGET} \ + && CC_x86_64_pc_windows_gnu=x86_64-w64-mingw32-gcc \ + CARGO_TARGET_X86_64_PC_WINDOWS_GNU_LINKER=x86_64-w64-mingw32-gcc \ + ${TARGET_CPU:+RUSTFLAGS="$RUSTFLAGS $TARGET_CPU"} \ + cargo build --features monitoring_prom,slog_json --release --workspace --target ${TARGET} --bin stacks-node --bin stacks-inspect --bin clarity-cli --bin blockstack-cli \ + && mkdir -p /out \ + && cp -R ${BUILD_DIR}/target/${TARGET}/release/. /out + +FROM scratch AS export-stage +COPY --from=build /out/stacks-inspect.exe /out/blockstack-cli.exe /out/clarity-cli.exe /out/stacks-node.exe / diff --git a/build-scripts/build-dist.sh b/build-scripts/build-dist.sh new file mode 100755 index 00000000000..8be8f4f8a71 --- /dev/null +++ b/build-scripts/build-dist.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e + +script_path="$(dirname "$0")" +src_dir="$(dirname "$script_path")" +cd "$src_dir" + +build_platform () { + echo "Building $1" + rm -rf dist/$1 + DOCKER_BUILDKIT=1 docker build --progress=plain -o dist/$1 -f ./build-scripts/Dockerfile.$1 . +} + +case $DIST_TARGET_FILTER in + (*[![:blank:]]*) + case $DIST_TARGET_FILTER in + linux-glibc-x64) build_platform linux-glibc-x64 ;; + linux-glibc-arm64) build_platform linux-glibc-arm64 ;; + linux-glibc-armv7) build_platform linux-glibc-armv7 ;; + linux-musl-x64) build_platform linux-musl-x64 ;; + linux-musl-arm64) build_platform linux-musl-arm64 ;; + linux-musl-armv7) build_platform linux-musl-armv7 ;; + windows-x64) build_platform windows-x64 ;; + macos-x64) build_platform macos-x64 ;; + macos-arm64) build_platform macos-arm64 ;; + *) + echo "Invalid dist target filter '$DIST_TARGET_FILTER'" + exit 1 + ;; + esac + ;; + (*) + echo "Building distrubtions for all targets." + build_platform linux-glibc-x64 + build_platform linux-glibc-arm64 + build_platform linux-glibc-armv7 + build_platform linux-musl-x64 + build_platform linux-musl-arm64 + build_platform linux-musl-armv7 + build_platform windows-x64 + build_platform macos-x64 + build_platform macos-arm64 + ;; +esac diff --git a/clarity/Cargo.toml b/clarity/Cargo.toml index 70cbcec5857..b080726636b 100644 --- a/clarity/Cargo.toml +++ b/clarity/Cargo.toml @@ -30,8 +30,7 @@ slog = { version = "2.5.2", features = [ "max_level_trace" ] } stacks_common = { package = "stacks-common", path = "../stacks-common" } rstest = "0.17.0" rstest_reuse = "0.5.0" -hashbrown = { workspace = true } -mutants = "0.0.3" +proptest = { workspace = true, optional = true } [dependencies.serde_json] version = "1.0" @@ -47,13 +46,15 @@ features = ["std"] [dev-dependencies] assert-json-diff = "1.0.0" -# a nightly rustc regression (35dbef235 2021-03-02) prevents criterion from compiling -# but it isn't necessary for tests: only benchmarks. therefore, commenting out for now. -# criterion = "0.3" +clarity = { path = "./", features = ["testing"] } +# This ensures that `stacks-common` is built using the `testing` feature +# when we build this crate in dev/test. Note that the --features flag +# doesn't need to be provideed then. +stacks_common = { package = "stacks-common", path = "../stacks-common", features = ["testing"] } [features] default = [] developer-mode = [] slog_json = ["stacks_common/slog_json"] -testing = [] -devtools = [] +testing = ["dep:proptest", "stacks_common/testing"] + diff --git a/clarity/src/libclarity.rs b/clarity/src/libclarity.rs index daae7dcfd7d..860a3ab0647 100644 --- a/clarity/src/libclarity.rs +++ b/clarity/src/libclarity.rs @@ -41,6 +41,10 @@ pub extern crate rstest_reuse; #[macro_use] extern crate stacks_common; +#[cfg(any(test, feature = "testing"))] +#[macro_use] +pub mod proptesting; + pub use stacks_common::{ codec, consts, impl_array_hexstring_fmt, impl_array_newtype, impl_byte_array_message_codec, impl_byte_array_serde, types, util, diff --git a/clarity/src/proptesting/callables.rs b/clarity/src/proptesting/callables.rs new file mode 100644 index 00000000000..aa7df423a81 --- /dev/null +++ b/clarity/src/proptesting/callables.rs @@ -0,0 +1,126 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::prelude::*; +use rand::distributions::uniform::SampleRange; +use serde::de::value; + +use super::*; +use crate::vm::callables::{DefineType, DefinedFunction, FunctionIdentifier}; +use crate::vm::database::{ + DataMapMetadata, DataVariableMetadata, FungibleTokenMetadata, NonFungibleTokenMetadata, +}; +use crate::vm::representations::TraitDefinition; +use crate::vm::types::FunctionSignature; + +/// Returns a [`Strategy`] for randomly generating a [`FunctionIdentifier`] instance +/// representing a user-defined function. +pub fn function_identifier_user() -> impl Strategy { + (clarity_name(), clarity_name()).prop_map(|(name, context)| { + FunctionIdentifier::new_user_function(&context.to_string(), &name.to_string()) + }) +} + +/// Returns a [`Strategy`] for randomly generating a [`FunctionIdentifier`] instance +/// representing a native function. +pub fn function_identifier_native() -> impl Strategy { + (clarity_name()).prop_map(|name| FunctionIdentifier::new_native_function(&name.to_string())) +} + +/// Returns a [`Strategy`] for randomly generating a [`FunctionIdentifier`] +/// instance representing a function of any kind, user-defined or native. +pub fn function_identifier() -> impl Strategy { + prop_oneof![function_identifier_user(), function_identifier_native()] +} + +/// Returns a [`Strategy`] for randomly generating a [`DefineType`] variant. +pub fn define_type() -> impl Strategy { + prop_oneof![ + Just(DefineType::Public), + Just(DefineType::Private), + Just(DefineType::ReadOnly) + ] +} + +/// Returns a [`Strategy`] for randomly generating a [`DataVariableMetadata`] +/// instance. +pub fn data_variable_metadata() -> impl Strategy { + type_signature().prop_map(|value_type| DataVariableMetadata { value_type }) +} + +/// Returns a [`Strategy`] for randomly generating a [`DataMapMetadata`] instance. +pub fn data_map_metadata() -> impl Strategy { + (type_signature(), type_signature()).prop_map(|(key_type, value_type)| DataMapMetadata { + key_type, + value_type, + }) +} + +/// Returns a [`Strategy`] for randomly generating a [`NonFungibleTokenMetadata`] +/// instance. +pub fn nft_metadata() -> impl Strategy { + type_signature().prop_map(|key_type| NonFungibleTokenMetadata { key_type }) +} + +/// Returns a [`Strategy`] for randomly generating a [`FungibleTokenMetadata`] +/// instance. +pub fn ft_metadata() -> impl Strategy { + any::>().prop_map(|total_supply| FungibleTokenMetadata { total_supply }) +} + +/// Returns a [`Strategy`] for randomly generating a [`FunctionSignature`] +/// instance. +pub fn function_signature() -> impl Strategy { + ( + // arg_types + prop::collection::vec(type_signature(), 0..3), + // return_type + type_signature(), + ) + .prop_map(|(args, returns)| FunctionSignature { args, returns }) +} + +/// Returns a [`Strategy`] for randomly generating a [`DefinedFunction`] +/// instance. +pub fn defined_function() -> impl Strategy { + ( + // identifier + function_identifier(), + // name + clarity_name(), + // arg_types + arguments, which must have the same length + (0usize..3usize).prop_flat_map(|x| { + ( + prop::collection::vec(type_signature(), x..=x), + prop::collection::vec(clarity_name(), x..=x), + ) + }), + // define_type + define_type(), + // body + symbolic_expression(), + ) + .prop_map( + |(identifier, name, args, define_type, body)| DefinedFunction { + identifier, + name, + arg_types: args.0, + define_type, + arguments: args.1, + body, + }, + ) +} diff --git a/clarity/src/proptesting/contracts.rs b/clarity/src/proptesting/contracts.rs new file mode 100644 index 00000000000..f00fc74ee0a --- /dev/null +++ b/clarity/src/proptesting/contracts.rs @@ -0,0 +1,99 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::collection::btree_map; +use proptest::prelude::*; +use stacks_common::proptesting::*; + +use super::*; +use crate::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; +use crate::vm::contracts::Contract; +use crate::vm::types::PrincipalData; +use crate::vm::{ClarityVersion, ContractContext, Value}; + +pub fn contract_context(clarity_version: ClarityVersion) -> impl Strategy { + ( + // contract_identifier + principal_contract().prop_map(|p| match p { + Value::Principal(PrincipalData::Contract(qual)) => qual, + _ => unreachable!(), + }), + // variables + prop::collection::vec((clarity_name(), PropValue::any().prop_map_into()), 0..8).prop_map( + |v| { + v.into_iter() + .map(|(k, v)| (k, v)) + .collect::>() + }, + ), + // functions + stacks_hash_map(clarity_name(), defined_function(), 1..5), + // defined_traits + stacks_hash_map( + clarity_name(), + btree_map(clarity_name(), function_signature(), 1..5), + 1..5, + ), + // implemented_traits + stacks_hash_set(trait_identifier(), 0..3), + // persisted_names + stacks_hash_set(clarity_name(), 0..5), + // meta_data_map + stacks_hash_map(clarity_name(), data_map_metadata(), 1..5), + // meta_data_var + stacks_hash_map(clarity_name(), data_variable_metadata(), 1..5), + // meta_nft + stacks_hash_map(clarity_name(), nft_metadata(), 1..5), + // meta_ft + stacks_hash_map(clarity_name(), ft_metadata(), 1..5), + // data_size + 0u64..64, + ) + .prop_map( + move |( + contract_identifier, + variables, + functions, + defined_traits, + implemented_traits, + persisted_names, + meta_data_map, + meta_data_var, + meta_nft, + meta_ft, + data_size, + )| { + let mut cc = ContractContext::new(contract_identifier, clarity_version); + cc.variables = variables; + cc.functions = functions; + cc.defined_traits = defined_traits; + cc.implemented_traits = implemented_traits; + cc.persisted_names = persisted_names; + cc.meta_data_map = meta_data_map; + cc.meta_data_var = meta_data_var; + cc.meta_nft = meta_nft; + cc.meta_ft = meta_ft; + cc.data_size = data_size; + cc + }, + ) +} + +pub fn contract() -> impl Strategy { + clarity_version() + .prop_flat_map(contract_context) + .prop_map(|contract_context| Contract { contract_context }) +} diff --git a/clarity/src/proptesting/mod.rs b/clarity/src/proptesting/mod.rs new file mode 100644 index 00000000000..40ea7c37e9c --- /dev/null +++ b/clarity/src/proptesting/mod.rs @@ -0,0 +1,43 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::prop_oneof; +use proptest::strategy::{Just, Strategy, ValueTree}; +use proptest::test_runner::{Config, RngAlgorithm, TestRng, TestRunner}; +use rand::Rng; +use stacks_common::types::StacksHashMap as HashMap; + +pub mod callables; +pub mod contracts; +pub mod representations; +pub mod types; +pub mod values; + +pub use callables::*; +pub use contracts::*; +pub use representations::*; +pub use types::*; +pub use values::*; + +use crate::vm::ClarityVersion; + +/// Returns a [`Strategy`] for randomly generating a [`ClarityVersion`] instance. +pub fn clarity_version() -> impl Strategy { + prop_oneof![ + Just(crate::vm::ClarityVersion::Clarity1), + Just(crate::vm::ClarityVersion::Clarity2), + ] +} diff --git a/clarity/src/proptesting/representations.rs b/clarity/src/proptesting/representations.rs new file mode 100644 index 00000000000..5640a4c2ebd --- /dev/null +++ b/clarity/src/proptesting/representations.rs @@ -0,0 +1,56 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::prelude::*; + +use super::*; +use crate::vm::representations::{Span, TraitDefinition}; +use crate::vm::{ClarityName, ContractName, SymbolicExpression, SymbolicExpressionType}; + +/// Returns a [`Strategy`] for randomly generating a [`ClarityName`]. +pub fn clarity_name() -> impl Strategy { + "[a-z]{40}".prop_map(|s| s.try_into().unwrap()) +} + +/// Returns a [`Strategy`] for randomly generating a [`ContractName`]. +pub fn contract_name() -> impl Strategy { + "[a-zA-Z]{1,40}".prop_map(|s| s.try_into().unwrap()) +} + +/// Returns a [`Strategy`] for randomly generating a [`TraitDefinition`]. +pub fn trait_definition() -> impl Strategy { + prop_oneof![ + trait_identifier().prop_map(TraitDefinition::Defined), + trait_identifier().prop_map(TraitDefinition::Imported) + ] +} + +/// Returns a [`Strategy`] for randomly generating a [`SymbolicExpression`]. +pub fn symbolic_expression() -> impl Strategy { + let leaf = prop_oneof![ + clarity_name().prop_map(|name| SymbolicExpression::atom(name)), + PropValue::any().prop_map(|val| SymbolicExpression::atom_value(val.into())), + PropValue::any().prop_map(|val| SymbolicExpression::literal_value(val.into())), + trait_identifier().prop_map(|name| SymbolicExpression::field(name)), + (clarity_name(), trait_definition()) + .prop_map(|(n, t)| SymbolicExpression::trait_reference(n, t)), + ]; + + leaf.prop_recursive(3, 64, 5, |inner| { + prop::collection::vec(inner, 1..3) + .prop_map(|list| SymbolicExpression::list(list.into_boxed_slice())) + }) +} diff --git a/clarity/src/proptesting/types.rs b/clarity/src/proptesting/types.rs new file mode 100644 index 00000000000..f47c7e4fa84 --- /dev/null +++ b/clarity/src/proptesting/types.rs @@ -0,0 +1,94 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::prelude::*; +use proptest::string::string_regex; + +use super::*; +use crate::types::{StacksHashMap, StacksHashSet}; +use crate::vm::callables::{DefineType, DefinedFunction, FunctionIdentifier}; +use crate::vm::contracts::Contract; +use crate::vm::representations::{ + Span, SymbolicExpression, SymbolicExpressionType, TraitDefinition, +}; +use crate::vm::types::{ + ASCIIData, BuffData, CharType, ListData, ListTypeData, OptionalData, PrincipalData, + QualifiedContractIdentifier, ResponseData, SequenceData, SequenceSubtype, + StandardPrincipalData, StringSubtype, StringUTF8Length, TraitIdentifier, TupleData, + TupleTypeSignature, TypeSignature, UTF8Data, Value, MAX_VALUE_SIZE, +}; +use crate::vm::{ClarityName, ClarityVersion, ContractContext, ContractName}; + +pub fn standard_principal_data() -> impl Strategy { + (0u8..32, prop::collection::vec(any::(), 20)) + .prop_map(|(v, hash)| StandardPrincipalData(v, hash.try_into().unwrap())) +} + +pub fn qualified_contract_identifier() -> impl Strategy { + (standard_principal_data(), contract_name()) + .prop_map(|(issuer, name)| QualifiedContractIdentifier { issuer, name }) +} + +pub fn trait_identifier() -> impl Strategy { + (clarity_name(), qualified_contract_identifier()).prop_map(|(name, contract_identifier)| { + TraitIdentifier { + name, + contract_identifier, + } + }) +} + +pub fn type_signature() -> impl Strategy { + let leaf = prop_oneof![ + Just(TypeSignature::IntType), + Just(TypeSignature::UIntType), + Just(TypeSignature::BoolType), + (0u32..128).prop_map(|s| TypeSignature::SequenceType(SequenceSubtype::BufferType( + s.try_into().unwrap() + ))), + (0u32..128).prop_map(|s| TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::ASCII(s.try_into().unwrap()) + ))), + Just(TypeSignature::PrincipalType), + (0u32..32).prop_map(|s| TypeSignature::SequenceType(SequenceSubtype::StringType( + StringSubtype::UTF8(s.try_into().unwrap()) + ))) + ]; + + leaf.prop_recursive(3, 32, 5, |inner| prop_oneof![ + // optional type: 10% NoType + 90% any other type + prop_oneof![ + 1 => Just(TypeSignature::NoType), + 9 => inner.clone(), + ] + .prop_map(|t| TypeSignature::new_option(t).unwrap()), + // response type: 20% (NoType, any) + 20% (any, NoType) + 60% (any, any) + prop_oneof![ + 1 => inner.clone().prop_map(|ok_ty| TypeSignature::new_response(ok_ty, TypeSignature::NoType).unwrap()), + 1 => inner.clone().prop_map(|err_ty| TypeSignature::new_response(TypeSignature::NoType, err_ty).unwrap()), + 3 => (inner.clone(), inner.clone()).prop_map(|(ok_ty, err_ty)| TypeSignature::new_response(ok_ty, err_ty).unwrap()), + ], + // tuple type + prop::collection::btree_map( + r#"[a-zA-Z]{1,16}"#.prop_map(|name| name.try_into().unwrap()), + inner.clone(), + 1..8 + ) + .prop_map(|btree| TypeSignature::TupleType(btree.try_into().unwrap())), + // list type + (1u32..8, inner.clone()).prop_map(|(s, ty)| (ListTypeData::new_list(ty, s).unwrap()).into()), + ]) +} diff --git a/clarity/src/proptesting/values.rs b/clarity/src/proptesting/values.rs new file mode 100644 index 00000000000..4e5543614a8 --- /dev/null +++ b/clarity/src/proptesting/values.rs @@ -0,0 +1,273 @@ +// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation +// Copyright (C) 2020 Stacks Open Internet Foundation +// +// This program is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// This program is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. +// +// You should have received a copy of the GNU General Public License +// along with this program. If not, see . + +use proptest::prelude::*; + +use super::*; +use crate::vm::types::{ + BuffData, CharType, ListData, ListTypeData, OptionalData, PrincipalData, + QualifiedContractIdentifier, ResponseData, SequenceData, SequenceSubtype, + StandardPrincipalData, StringSubtype, TupleData, TupleTypeSignature, TypeSignature, UTF8Data, +}; +use crate::vm::{ContractName, Value}; + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of a +/// the specified ([`TypeSignature`]). +pub fn value(ty: TypeSignature) -> impl Strategy { + match ty { + TypeSignature::NoType => unreachable!(), + TypeSignature::IntType => int().boxed(), + TypeSignature::UIntType => uint().boxed(), + TypeSignature::BoolType => bool().boxed(), + TypeSignature::OptionalType(ty) => optional(*ty).boxed(), + TypeSignature::ResponseType(ok_err) => response(ok_err.0, ok_err.1).boxed(), + TypeSignature::SequenceType(SequenceSubtype::BufferType(size)) => { + buffer(size.into()).boxed() + } + TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::ASCII(size))) => { + string_ascii(size.into()).boxed() + } + TypeSignature::SequenceType(SequenceSubtype::StringType(StringSubtype::UTF8(size))) => { + string_utf8(size.into()).boxed() + } + TypeSignature::SequenceType(SequenceSubtype::ListType(list_type_data)) => { + list(list_type_data).boxed() + } + TypeSignature::TupleType(tuple_ty) => tuple(tuple_ty).boxed(), + TypeSignature::PrincipalType => { + prop_oneof![principal_standard(), principal_contract()].boxed() + } + // TODO + TypeSignature::ListUnionType(_) => todo!(), + TypeSignature::CallableType(_) => todo!(), + TypeSignature::TraitReferenceType(_) => todo!(), + } +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Int`]. +pub fn int() -> impl Strategy { + any::().prop_map(Value::Int) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::UInt`]. +pub fn uint() -> impl Strategy { + any::().prop_map(Value::UInt) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Bool`]. +pub fn bool() -> impl Strategy { + any::().prop_map(Value::Bool) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::None`]. +pub fn string_ascii(size: u32) -> impl Strategy { + let size = size as usize; + prop::collection::vec(0x20u8..0x7e, size..=size).prop_map(|bytes| { + Value::Sequence(SequenceData::String(crate::vm::types::CharType::ASCII( + crate::vm::types::ASCIIData { data: bytes }, + ))) + }) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Sequence`] with an inner type of [`UTF8Data`]. +pub fn string_utf8(size: u32) -> impl Strategy { + prop::collection::vec(any::(), size as usize).prop_map(|chars| { + let mut data = Vec::with_capacity(chars.len()); + for c in chars { + let mut encoded_char = vec![0; c.len_utf8()]; + c.encode_utf8(encoded_char.as_mut()); + data.push(encoded_char); + } + Value::Sequence(SequenceData::String(CharType::UTF8(UTF8Data { data }))) + }) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Sequence`] with an inner type of [`BuffData`]. +pub fn buffer(size: u32) -> impl Strategy { + let size = size as usize; + prop::collection::vec(any::(), size..=size) + .prop_map(|bytes| Value::Sequence(SequenceData::Buffer(BuffData { data: bytes }))) +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Optional`], with the inner type being the specified [`TypeSignature`]. +pub fn optional(inner_ty: TypeSignature) -> impl Strategy { + match inner_ty { + TypeSignature::NoType => Just(Value::none()).boxed(), + _ => prop::option::of(value(inner_ty)) + .prop_map(|v| { + Value::Optional(OptionalData { + data: v.map(Box::new), + }) + }) + .boxed(), + } +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Response`], with the ok/err types being the specified [`TypeSignature`]s. +pub fn response(ok_ty: TypeSignature, err_ty: TypeSignature) -> impl Strategy { + match (ok_ty, err_ty) { + (TypeSignature::NoType, err_ty) => value(err_ty) + .prop_map(|err| { + Value::Response(ResponseData { + committed: false, + data: Box::new(err), + }) + }) + .boxed(), + (ok_ty, TypeSignature::NoType) => value(ok_ty) + .prop_map(|ok| { + Value::Response(ResponseData { + committed: true, + data: Box::new(ok), + }) + }) + .boxed(), + (ok_ty, err_ty) => prop::result::maybe_err(value(ok_ty), value(err_ty)) + .prop_map(|res| { + Value::Response(ResponseData { + committed: res.is_ok(), + data: res.map_or_else(Box::new, Box::new), + }) + }) + .boxed(), + } +} + +/// Returns a [`Strategy`] for generating a randomized [`Value`] instance of variant +/// [`Value::Sequence`] with the inner type being a list ([`SequenceData`]) of +/// the specified [`ListTypeData`]. +pub fn list(list_type_data: ListTypeData) -> impl Strategy { + prop::collection::vec( + value(list_type_data.get_list_item_type().clone()), + 0..=list_type_data.get_max_len() as usize, + ) + .prop_map(move |v| { + Value::Sequence(SequenceData::List(ListData { + data: v, + type_signature: list_type_data.clone(), + })) + }) +} + +pub fn tuple(tuple_ty: TupleTypeSignature) -> impl Strategy { + let fields: Vec<_> = tuple_ty.get_type_map().keys().cloned().collect(); + let strategies: Vec<_> = tuple_ty + .get_type_map() + .values() + .cloned() + .map(value) + .collect(); + strategies.prop_map(move |vec_values| { + TupleData { + type_signature: tuple_ty.clone(), + data_map: fields.clone().into_iter().zip(vec_values).collect(), + } + .into() + }) +} + +pub fn principal_standard() -> impl Strategy { + (0u8..32, prop::collection::vec(any::(), 20)) + .prop_map(|(v, hash)| { + Value::Principal(PrincipalData::Standard(StandardPrincipalData( + v, + hash.try_into().unwrap(), + ))) + }) + .no_shrink() +} + +pub fn principal_contract() -> impl Strategy { + (principal_standard(), "[a-zA-Z]{1,40}").prop_map(|(issuer_value, name)| { + let Value::Principal(PrincipalData::Standard(issuer)) = issuer_value else { + unreachable!() + }; + let name = ContractName::from(&*name); + Value::Principal(PrincipalData::Contract(QualifiedContractIdentifier { + issuer, + name, + })) + }) +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct PropValue(Value); + +impl From for PropValue { + fn from(value: Value) -> Self { + PropValue(value) + } +} + +impl From for Value { + fn from(value: PropValue) -> Self { + value.0 + } +} + +impl PropValue { + pub fn any() -> impl Strategy { + type_signature().prop_flat_map(value).prop_map_into() + } + + pub fn from_type(ty: TypeSignature) -> impl Strategy { + value(ty).prop_map_into() + } + + pub fn many_from_type(ty: TypeSignature, count: usize) -> impl Strategy> { + prop::collection::vec(Self::from_type(ty.clone()), count) + } + + pub fn any_sequence(size: usize) -> impl Strategy { + let any_list = type_signature() + .prop_ind_flat_map2(move |ty| prop::collection::vec(value(ty), size)) + .prop_map(move |(ty, vec)| { + Value::Sequence(SequenceData::List(ListData { + data: vec, + type_signature: ListTypeData::new_list(ty, size as u32).unwrap(), + })) + }); + // TODO: add string-utf8 + prop_oneof![ + // 10% chance for a buffer + 1 => buffer(size as u32), + // 10% chance for a string-ascii + 1 => string_ascii(size as u32), + // 10% change for a string-utf8 + 1 => string_utf8(size as u32), + // 70% chance for a list + 7 => any_list + ] + .prop_map_into() + } +} + +impl TryFrom> for PropValue { + type Error = crate::vm::errors::Error; + + fn try_from(values: Vec) -> Result { + let values = values.into_iter().map(Value::from).collect(); + Value::cons_list_unsanitized(values).map(PropValue::from) + } +} diff --git a/clarity/src/vm/analysis/arithmetic_checker/mod.rs b/clarity/src/vm/analysis/arithmetic_checker/mod.rs index 5595905a484..622a6500eb1 100644 --- a/clarity/src/vm/analysis/arithmetic_checker/mod.rs +++ b/clarity/src/vm/analysis/arithmetic_checker/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::HashMap; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; pub use super::errors::{ check_argument_count, check_arguments_at_least, CheckError, CheckErrors, CheckResult, diff --git a/clarity/src/vm/analysis/contract_interface_builder/mod.rs b/clarity/src/vm/analysis/contract_interface_builder/mod.rs index 6d91f33b1c6..63cf1199331 100644 --- a/clarity/src/vm/analysis/contract_interface_builder/mod.rs +++ b/clarity/src/vm/analysis/contract_interface_builder/mod.rs @@ -171,16 +171,14 @@ impl ContractInterfaceAtomType { pub fn vec_from_tuple_type( tuple_type: &TupleTypeSignature, ) -> Vec { - let mut out: Vec<_> = tuple_type + tuple_type .get_type_map() .iter() .map(|(name, sig)| ContractInterfaceTupleEntryType { name: name.to_string(), type_f: Self::from_type_signature(sig), }) - .collect(); - out.sort_unstable_by(|ty1, ty2| ty1.name.cmp(&ty2.name)); - out + .collect() } pub fn from_type_signature(sig: &TypeSignature) -> ContractInterfaceAtomType { diff --git a/clarity/src/vm/analysis/mod.rs b/clarity/src/vm/analysis/mod.rs index 4da10f88bf9..5825305a338 100644 --- a/clarity/src/vm/analysis/mod.rs +++ b/clarity/src/vm/analysis/mod.rs @@ -71,7 +71,6 @@ pub fn mem_type_check( cost_tracker, epoch, version, - true, ) { Ok(x) => { // return the first type result of the type checker @@ -79,7 +78,7 @@ pub fn mem_type_check( .type_map .as_ref() .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))? - .get_type_expected( + .get_type( x.expressions .last() .ok_or_else(|| CheckErrors::Expects("Should be non-empty".into()))?, @@ -112,7 +111,6 @@ pub fn type_check( LimitedCostTracker::new_free(), *epoch, *version, - true, ) .map_err(|(e, _cost_tracker)| e) } @@ -125,7 +123,6 @@ pub fn run_analysis( cost_tracker: LimitedCostTracker, epoch: StacksEpochId, version: ClarityVersion, - build_type_map: bool, ) -> Result { let mut contract_analysis = ContractAnalysis::new( contract_identifier.clone(), @@ -138,7 +135,7 @@ pub fn run_analysis( ReadOnlyChecker::run_pass(&epoch, &mut contract_analysis, db)?; match epoch { StacksEpochId::Epoch20 | StacksEpochId::Epoch2_05 => { - TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db, build_type_map) + TypeChecker2_05::run_pass(&epoch, &mut contract_analysis, db) } StacksEpochId::Epoch21 | StacksEpochId::Epoch22 @@ -146,7 +143,7 @@ pub fn run_analysis( | StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { - TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db, build_type_map) + TypeChecker2_1::run_pass(&epoch, &mut contract_analysis, db) } StacksEpochId::Epoch10 => { return Err(CheckErrors::Expects( diff --git a/clarity/src/vm/analysis/read_only_checker/mod.rs b/clarity/src/vm/analysis/read_only_checker/mod.rs index b02923c1a1a..9c3b602511a 100644 --- a/clarity/src/vm/analysis/read_only_checker/mod.rs +++ b/clarity/src/vm/analysis/read_only_checker/mod.rs @@ -14,8 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; pub use super::errors::{ check_argument_count, check_arguments_at_least, CheckError, CheckErrors, CheckResult, diff --git a/clarity/src/vm/analysis/trait_checker/mod.rs b/clarity/src/vm/analysis/trait_checker/mod.rs index 868c1d378e0..4941dcb779b 100644 --- a/clarity/src/vm/analysis/trait_checker/mod.rs +++ b/clarity/src/vm/analysis/trait_checker/mod.rs @@ -14,8 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::analysis::types::{AnalysisPass, ContractAnalysis}; diff --git a/clarity/src/vm/analysis/type_checker/contexts.rs b/clarity/src/vm/analysis/type_checker/contexts.rs index 936cc47bc49..5fcc31677d1 100644 --- a/clarity/src/vm/analysis/type_checker/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/contexts.rs @@ -14,32 +14,16 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashSet; - -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::{TraitIdentifier, TypeSignature}; use crate::vm::{ClarityName, ClarityVersion, SymbolicExpression, MAX_CONTEXT_DEPTH}; -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, PartialEq, Eq, Clone)] pub struct TypeMap { - map: TypeMapDataType, -} - -#[derive(Debug, Clone, PartialEq)] -/// This enum allows the type checker to operate -/// with two different kinds of type maps. The Set -/// version is more efficient, and only triggers an error -/// if an AST node is visited more than once. The Map -/// version is used when the actual type of each AST node -/// is needed by a subsequent reader. This is only used by -/// tests and docs generation. -enum TypeMapDataType { - Map(HashMap), - Set(HashSet), + map: HashMap, } pub struct TypingContext<'a> { @@ -51,14 +35,17 @@ pub struct TypingContext<'a> { pub depth: u16, } +impl Default for TypeMap { + fn default() -> Self { + Self::new() + } +} + impl TypeMap { - pub fn new(build_map: bool) -> TypeMap { - let map = if build_map { - TypeMapDataType::Map(HashMap::new()) - } else { - TypeMapDataType::Set(HashSet::new()) - }; - TypeMap { map } + pub fn new() -> TypeMap { + TypeMap { + map: HashMap::new(), + } } pub fn set_type( @@ -66,29 +53,15 @@ impl TypeMap { expr: &SymbolicExpression, type_sig: TypeSignature, ) -> CheckResult<()> { - match self.map { - TypeMapDataType::Map(ref mut map) => { - if map.insert(expr.id, type_sig).is_some() { - Err(CheckError::new(CheckErrors::TypeAlreadyAnnotatedFailure)) - } else { - Ok(()) - } - } - TypeMapDataType::Set(ref mut map) => { - if !map.insert(expr.id) { - Err(CheckError::new(CheckErrors::TypeAlreadyAnnotatedFailure)) - } else { - Ok(()) - } - } + if self.map.insert(expr.id, type_sig).is_some() { + Err(CheckError::new(CheckErrors::TypeAlreadyAnnotatedFailure)) + } else { + Ok(()) } } - pub fn get_type_expected(&self, expr: &SymbolicExpression) -> Option<&TypeSignature> { - match self.map { - TypeMapDataType::Map(ref map) => map.get(&expr.id), - TypeMapDataType::Set(_) => None, - } + pub fn get_type(&self, expr: &SymbolicExpression) -> Option<&TypeSignature> { + self.map.get(&expr.id) } } diff --git a/clarity/src/vm/analysis/type_checker/v2_05/contexts.rs b/clarity/src/vm/analysis/type_checker/v2_05/contexts.rs index 2a11f6839f7..c4198879d76 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/contexts.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::analysis::types::ContractAnalysis; diff --git a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs index d66cad5d4ee..30699f46df5 100644 --- a/clarity/src/vm/analysis/type_checker/v2_05/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_05/mod.rs @@ -20,8 +20,7 @@ pub mod natives; use std::collections::BTreeMap; -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use self::contexts::ContractContext; pub use self::natives::{SimpleNativeFunction, TypedNativeFunction}; @@ -111,15 +110,14 @@ impl CostTracker for TypeChecker<'_, '_> { } } -impl TypeChecker<'_, '_> { - pub fn run_pass( +impl AnalysisPass for TypeChecker<'_, '_> { + fn run_pass( _epoch: &StacksEpochId, contract_analysis: &mut ContractAnalysis, analysis_db: &mut AnalysisDatabase, - build_type_map: bool, ) -> CheckResult<()> { let cost_track = contract_analysis.take_contract_cost_tracker(); - let mut command = TypeChecker::new(analysis_db, cost_track, build_type_map); + let mut command = TypeChecker::new(analysis_db, cost_track); // run the analysis, and replace the cost tracker whether or not the // analysis succeeded. match command.run(contract_analysis) { @@ -344,14 +342,13 @@ impl<'a, 'b> TypeChecker<'a, 'b> { fn new( db: &'a mut AnalysisDatabase<'b>, cost_track: LimitedCostTracker, - build_type_map: bool, ) -> TypeChecker<'a, 'b> { Self { db, cost_track, contract_context: ContractContext::new(), function_return_tracker: None, - type_map: TypeMap::new(build_type_map), + type_map: TypeMap::new(), } } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs index 8cbed1a416d..b61700c58d3 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/contexts.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::analysis::types::ContractAnalysis; diff --git a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs index b61d3bb6e28..333a3d567ed 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/mod.rs @@ -19,8 +19,7 @@ pub mod natives; use std::collections::BTreeMap; -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use self::contexts::ContractContext; pub use self::natives::{SimpleNativeFunction, TypedNativeFunction}; @@ -75,7 +74,6 @@ Is illegally typed in our language. */ pub struct TypeChecker<'a, 'b> { - epoch: StacksEpochId, pub type_map: TypeMap, contract_context: ContractContext, function_return_tracker: Option>, @@ -116,21 +114,18 @@ impl CostTracker for TypeChecker<'_, '_> { } } -impl TypeChecker<'_, '_> { - pub fn run_pass( - epoch: &StacksEpochId, +impl AnalysisPass for TypeChecker<'_, '_> { + fn run_pass( + _epoch: &StacksEpochId, contract_analysis: &mut ContractAnalysis, analysis_db: &mut AnalysisDatabase, - build_type_map: bool, ) -> CheckResult<()> { let cost_track = contract_analysis.take_contract_cost_tracker(); let mut command = TypeChecker::new( - epoch, analysis_db, cost_track, &contract_analysis.contract_identifier, &contract_analysis.clarity_version, - build_type_map, ); // run the analysis, and replace the cost tracker whether or not the // analysis succeeded. @@ -758,7 +753,7 @@ fn clarity2_inner_type_check_type( TypeSignature::CallableType(CallableSubtype::Trait(_)), ) => { // Verify that all types in the union implement this trait - for subtype in types { + for subtype in types.iter() { clarity2_inner_type_check_type( db, contract_context, @@ -880,20 +875,17 @@ pub fn no_type() -> TypeSignature { impl<'a, 'b> TypeChecker<'a, 'b> { fn new( - epoch: &StacksEpochId, db: &'a mut AnalysisDatabase<'b>, cost_track: LimitedCostTracker, contract_identifier: &QualifiedContractIdentifier, clarity_version: &ClarityVersion, - build_type_map: bool, ) -> TypeChecker<'a, 'b> { Self { - epoch: epoch.clone(), db, cost_track, contract_context: ContractContext::new(contract_identifier.clone(), *clarity_version), function_return_tracker: None, - type_map: TypeMap::new(build_type_map), + type_map: TypeMap::new(), clarity_version: *clarity_version, } } @@ -1083,20 +1075,9 @@ impl<'a, 'b> TypeChecker<'a, 'b> { } let mut function_context = context.extend()?; - let mut tracked_mem = 0u64; for (arg_name, arg_type) in args.iter() { self.contract_context.check_name_used(arg_name)?; - if self.epoch.analysis_memory() { - let added_memory = u64::from(arg_name.len()) - .checked_add(arg_type.type_size()?.into()) - .ok_or_else(|| CostErrors::CostOverflow)?; - self.add_memory(added_memory)?; - tracked_mem = tracked_mem - .checked_add(added_memory) - .ok_or_else(|| CostErrors::CostOverflow)?; - } - match arg_type { TypeSignature::CallableType(CallableSubtype::Trait(trait_id)) => { function_context.add_trait_reference(arg_name, trait_id); @@ -1113,9 +1094,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { let return_result = self.type_check(body, &function_context); - drop(function_context); - self.drop_memory(tracked_mem)?; - match return_result { Err(e) => { self.function_return_tracker = None; @@ -1456,10 +1434,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, v_type.type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(v_name.len().into())?; - self.add_memory(v_type.type_size()?.into())?; - } self.contract_context.add_variable_type(v_name, v_type)?; } DefineFunctionsParsed::PrivateFunction { signature, body } => { @@ -1471,10 +1445,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, f_type.total_type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(f_name.len().into())?; - self.add_memory(f_type.total_type_size()?)?; - } self.contract_context .add_private_function_type(f_name, FunctionType::Fixed(f_type))?; } @@ -1486,10 +1456,7 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, f_type.total_type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(f_name.len().into())?; - self.add_memory(f_type.total_type_size()?)?; - } + if f_type.returns.is_response_type() { self.contract_context .add_public_function_type(f_name, FunctionType::Fixed(f_type))?; @@ -1508,10 +1475,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, f_type.total_type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(f_name.len().into())?; - self.add_memory(f_type.total_type_size()?)?; - } self.contract_context .add_read_only_function_type(f_name, FunctionType::Fixed(f_type))?; } @@ -1525,11 +1488,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { let total_type_size = u64::from(map_type.0.type_size()?) .cost_overflow_add(u64::from(map_type.1.type_size()?))?; runtime_cost(ClarityCostFunction::AnalysisBindName, self, total_type_size)?; - if self.epoch.analysis_memory() { - self.add_memory(f_name.len().into())?; - self.add_memory(map_type.0.type_size()?.into())?; - self.add_memory(map_type.1.type_size()?.into())?; - } self.contract_context.add_map_type(f_name, map_type)?; } DefineFunctionsParsed::PersistedVariable { @@ -1544,10 +1502,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, v_type.type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(v_name.len().into())?; - self.add_memory(v_type.type_size()?.into())?; - } self.contract_context .add_persisted_variable_type(v_name, v_type)?; } @@ -1558,10 +1512,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, TypeSignature::UIntType.type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(token_name.len().into())?; - self.add_memory(TypeSignature::UIntType.type_size()?.into())?; - } self.contract_context.add_ft(token_name)?; } DefineFunctionsParsed::UnboundedFungibleToken { name } => { @@ -1571,10 +1521,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, TypeSignature::UIntType.type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(token_name.len().into())?; - self.add_memory(TypeSignature::UIntType.type_size()?.into())?; - } self.contract_context.add_ft(token_name)?; } DefineFunctionsParsed::NonFungibleToken { name, nft_type } => { @@ -1585,10 +1531,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, token_type.type_size()?, )?; - if self.epoch.analysis_memory() { - self.add_memory(token_name.len().into())?; - self.add_memory(token_type.type_size()?.into())?; - } self.contract_context.add_nft(token_name, token_type)?; } DefineFunctionsParsed::Trait { name, functions } => { @@ -1599,10 +1541,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { self, trait_type_size(&trait_signature)?, )?; - if self.epoch.analysis_memory() { - self.add_memory(trait_name.len().into())?; - self.add_memory(trait_type_size(&trait_signature)?)?; - } self.contract_context .add_defined_trait(trait_name, trait_signature)?; } @@ -1624,10 +1562,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { type_size, )?; runtime_cost(ClarityCostFunction::AnalysisBindName, self, type_size)?; - if self.epoch.analysis_memory() { - self.add_memory(trait_identifier.name.len().into())?; - self.add_memory(type_size)?; - } self.contract_context.add_used_trait( name.clone(), trait_identifier.clone(), @@ -1642,9 +1576,6 @@ impl<'a, 'b> TypeChecker<'a, 'b> { } } DefineFunctionsParsed::ImplTrait { trait_identifier } => { - if self.epoch.analysis_memory() { - self.add_memory(trait_identifier.name.len().into())?; - } self.contract_context .add_implemented_trait(trait_identifier.clone())?; } diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs index c5aefb65eda..9fdc8c704cb 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/mod.rs @@ -23,8 +23,7 @@ use super::{ use crate::vm::analysis::errors::{CheckError, CheckErrors, CheckResult}; use crate::vm::costs::cost_functions::ClarityCostFunction; use crate::vm::costs::{ - analysis_typecheck_cost, cost_functions, runtime_cost, CostErrors, CostOverflowingMath, - CostTracker, + analysis_typecheck_cost, cost_functions, runtime_cost, CostOverflowingMath, }; use crate::vm::errors::{Error as InterpError, RuntimeErrorType}; use crate::vm::functions::{handle_binding_list, NativeFunctions}; @@ -235,7 +234,6 @@ fn check_special_let( runtime_cost(ClarityCostFunction::AnalysisCheckLet, checker, args.len())?; - let mut added_memory = 0u64; handle_binding_list(binding_list, |var_name, var_sexp| { checker.contract_context.check_name_used(var_name)?; if out_context.lookup_variable_type(var_name).is_some() { @@ -251,24 +249,11 @@ fn check_special_let( checker, typed_result.type_size()?, )?; - if checker.epoch.analysis_memory() { - let memory_use = u64::from(var_name.len()) - .checked_add(u64::from(typed_result.type_size()?)) - .ok_or_else(|| CostErrors::CostOverflow)?; - added_memory = added_memory - .checked_add(memory_use) - .ok_or_else(|| CostErrors::CostOverflow)?; - checker.add_memory(memory_use)?; - } out_context.add_variable_type(var_name.clone(), typed_result, checker.clarity_version); Ok(()) })?; - let res = checker.type_check_consecutive_statements(&args[1..args.len()], &out_context); - if checker.epoch.analysis_memory() { - checker.drop_memory(added_memory)?; - } - res + checker.type_check_consecutive_statements(&args[1..args.len()], &out_context) } fn check_special_fetch_var( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs index 6a097a8cd6b..207308745f3 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/natives/options.rs @@ -22,9 +22,7 @@ use super::{ }; use crate::vm::analysis::type_checker::contexts::TypingContext; use crate::vm::costs::cost_functions::ClarityCostFunction; -use crate::vm::costs::{ - analysis_typecheck_cost, cost_functions, runtime_cost, CostErrors, CostTracker, -}; +use crate::vm::costs::{analysis_typecheck_cost, cost_functions, runtime_cost}; use crate::vm::representations::{ClarityName, SymbolicExpression}; use crate::vm::types::signatures::CallableSubtype; use crate::vm::types::TypeSignature; @@ -288,13 +286,7 @@ fn eval_with_new_binding( checker, bind_type.type_size()?, )?; - let mut memory_use = 0; - if checker.epoch.analysis_memory() { - memory_use = u64::from(bind_name.len()) - .checked_add(u64::from(bind_type.type_size()?)) - .ok_or_else(|| CostErrors::CostOverflow)?; - checker.add_memory(memory_use)?; - } + checker.contract_context.check_name_used(&bind_name)?; if inner_context.lookup_variable_type(&bind_name).is_some() { @@ -303,11 +295,7 @@ fn eval_with_new_binding( inner_context.add_variable_type(bind_name, bind_type, checker.clarity_version); - let result = checker.type_check(body, &inner_context); - if checker.epoch.analysis_memory() { - checker.drop_memory(memory_use)?; - } - result + checker.type_check(body, &inner_context) } fn check_special_match_opt( diff --git a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs index 99942ba42c2..2f023dcf4fe 100644 --- a/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs +++ b/clarity/src/vm/analysis/type_checker/v2_1/tests/contracts.rs @@ -81,7 +81,6 @@ pub fn type_check_version( LimitedCostTracker::new_free(), epoch, version, - false, ) .map_err(|(e, _)| e) } diff --git a/clarity/src/vm/analysis/types.rs b/clarity/src/vm/analysis/types.rs index 60a93f9c79e..75691699cd9 100644 --- a/clarity/src/vm/analysis/types.rs +++ b/clarity/src/vm/analysis/types.rs @@ -16,8 +16,7 @@ use std::collections::{BTreeMap, BTreeSet}; -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::analysis_db::AnalysisDatabase; use crate::vm::analysis::contract_interface_builder::ContractInterface; diff --git a/clarity/src/vm/ast/definition_sorter/mod.rs b/clarity/src/vm/ast/definition_sorter/mod.rs index eee66253104..ba99999aac3 100644 --- a/clarity/src/vm/ast/definition_sorter/mod.rs +++ b/clarity/src/vm/ast/definition_sorter/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::ast::errors::{ParseError, ParseErrors, ParseResult}; use crate::vm::ast::types::{BuildASTPass, ContractAST}; diff --git a/clarity/src/vm/ast/mod.rs b/clarity/src/vm/ast/mod.rs index 1cff959695a..a95d32047e1 100644 --- a/clarity/src/vm/ast/mod.rs +++ b/clarity/src/vm/ast/mod.rs @@ -319,8 +319,7 @@ pub fn build_ast( #[cfg(test)] mod test { - use hashbrown::HashMap; - use stacks_common::types::StacksEpochId; + use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::ast::errors::ParseErrors; use crate::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; diff --git a/clarity/src/vm/ast/parser/v1.rs b/clarity/src/vm/ast/parser/v1.rs index 75c5ea2df94..4cdea6e2782 100644 --- a/clarity/src/vm/ast/parser/v1.rs +++ b/clarity/src/vm/ast/parser/v1.rs @@ -525,7 +525,7 @@ pub fn parse_lexed(input: Vec<(LexItem, u32, u32)>) -> ParseResult { - let checked_list: ParseResult> = list + let checked_list: ParseResult> = list .into_iter() .map(|i| match i { ParseStackItem::Expression(e) => Ok(e), @@ -601,7 +601,8 @@ pub fn parse_lexed(input: Vec<(LexItem, u32, u32)>) -> ParseResult unreachable!("More than four modulos of four."), }?; } - let mut pre_expr = PreSymbolicExpression::tuple(checked_list); + let mut pre_expr = + PreSymbolicExpression::tuple(checked_list.into_boxed_slice()); pre_expr.set_span(start_line, start_column, line_pos, column_pos); handle_expression(&mut parse_stack, &mut output_list, pre_expr); } @@ -771,7 +772,7 @@ mod test { start_column: u32, end_line: u32, end_column: u32, - x: Vec, + x: Box<[PreSymbolicExpression]>, ) -> PreSymbolicExpression { let mut e = PreSymbolicExpression::list(x); e.set_span(start_line, start_column, end_line, end_column); @@ -783,7 +784,7 @@ mod test { start_column: u32, end_line: u32, end_column: u32, - x: Vec, + x: Box<[PreSymbolicExpression]>, ) -> PreSymbolicExpression { let mut e = PreSymbolicExpression::tuple(x); e.set_span(start_line, start_column, end_line, end_column); @@ -807,42 +808,42 @@ mod test { 3, 6, 11, - vec![ + Box::new([ make_atom("let", 1, 4, 1, 6), make_list( 1, 8, 1, 20, - vec![ + Box::new([ make_list( 1, 9, 1, 13, - vec![ + Box::new([ make_atom("x", 1, 10, 1, 10), make_atom_value(Value::Int(1), 1, 12, 1, 12), - ], + ]), ), make_list( 1, 15, 1, 19, - vec![ + Box::new([ make_atom("y", 1, 16, 1, 16), make_atom_value(Value::Int(2), 1, 18, 1, 18), - ], + ]), ), - ], + ]), ), make_list( 2, 5, 6, 10, - vec![ + Box::new([ make_atom("+", 2, 6, 2, 6), make_atom("x", 2, 8, 2, 8), make_list( @@ -850,41 +851,41 @@ mod test { 9, 5, 16, - vec![ + Box::new([ make_atom("let", 4, 10, 4, 12), make_list( 4, 14, 4, 20, - vec![make_list( + Box::new([make_list( 4, 15, 4, 19, - vec![ + Box::new([ make_atom("x", 4, 16, 4, 16), make_atom_value(Value::Int(3), 4, 18, 4, 18), - ], - )], + ]), + )]), ), make_list( 5, 9, 5, 15, - vec![ + Box::new([ make_atom("+", 5, 10, 5, 10), make_atom("x", 5, 12, 5, 12), make_atom("y", 5, 14, 5, 14), - ], + ]), ), - ], + ]), ), make_atom("x", 6, 9, 6, 9), - ], + ]), ), - ], + ]), ), make_atom("x", 6, 13, 6, 13), make_atom("y", 6, 15, 6, 15), @@ -906,11 +907,11 @@ mod test { 9, 2, 17, - vec![ + Box::new([ make_atom("-", 2, 10, 2, 10), make_atom_value(Value::Int(12), 2, 12, 2, 13), make_atom_value(Value::Int(34), 2, 15, 2, 16), - ], + ]), ), ]; @@ -930,10 +931,10 @@ mod test { 1, 1, 11, - vec![ + Box::new([ make_atom("id", 1, 2, 1, 3), make_atom_value(Value::Int(1337), 1, 6, 1, 9), - ], + ]), )]; let parsed = ast::parser::v1::parse(input); assert_eq!(Ok(program), parsed, "Should match expected tuple literal"); diff --git a/clarity/src/vm/ast/parser/v2/mod.rs b/clarity/src/vm/ast/parser/v2/mod.rs index a7ba4eb3c89..addbba1c59b 100644 --- a/clarity/src/vm/ast/parser/v2/mod.rs +++ b/clarity/src/vm/ast/parser/v2/mod.rs @@ -236,7 +236,7 @@ impl<'a> Parser<'a> { span.end_line = token.span.end_line; span.end_column = token.span.end_column; let out_nodes: Vec<_> = std::mem::take(nodes); - let mut e = PreSymbolicExpression::list(out_nodes); + let mut e = PreSymbolicExpression::list(out_nodes.into_boxed_slice()); e.copy_span(span); Ok(Some(e)) } @@ -253,7 +253,7 @@ impl<'a> Parser<'a> { span.end_line = token.span.end_line; span.end_column = token.span.end_column; let out_nodes: Vec<_> = std::mem::take(nodes); - let mut e = PreSymbolicExpression::list(out_nodes); + let mut e = PreSymbolicExpression::list(out_nodes.into_boxed_slice()); e.copy_span(span); Ok(Some(e)) } @@ -301,7 +301,8 @@ impl<'a> Parser<'a> { open_tuple.span.clone(), )?; let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = + PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); let span_before_eof = &self.tokens[self.tokens.len() - 2].span; open_tuple.span.end_line = span_before_eof.end_line; open_tuple.span.end_column = span_before_eof.end_column; @@ -340,7 +341,7 @@ impl<'a> Parser<'a> { placeholder.copy_span(&token.span); open_tuple.nodes.push(placeholder); // Placeholder value let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); let span_before_eof = &self.tokens[self.tokens.len() - 2].span; open_tuple.span.end_line = span_before_eof.end_line; open_tuple.span.end_column = span_before_eof.end_column; @@ -385,7 +386,8 @@ impl<'a> Parser<'a> { placeholder.copy_span(&eof_span); open_tuple.nodes.push(placeholder); // Placeholder value let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = + PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); open_tuple.span.end_line = open_tuple.diagnostic_token.span.end_line; open_tuple.span.end_column = @@ -420,7 +422,7 @@ impl<'a> Parser<'a> { open_tuple.span.end_column = token.span.end_column; self.next_token(); let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); e.copy_span(&open_tuple.span); return Ok(Some(e)); } @@ -438,7 +440,7 @@ impl<'a> Parser<'a> { open_tuple.span.end_column = token.span.end_column; self.next_token(); let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); e.copy_span(&open_tuple.span); return Ok(Some(e)); } @@ -477,7 +479,7 @@ impl<'a> Parser<'a> { open_tuple.span.end_column = token.span.end_column; self.next_token(); let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); e.copy_span(&open_tuple.span); return Ok(SetupTupleResult::Closed(e)); } @@ -494,7 +496,7 @@ impl<'a> Parser<'a> { open_tuple.span.end_column = token.span.end_column; self.next_token(); let out_nodes: Vec<_> = open_tuple.nodes.drain(..).collect(); - let mut e = PreSymbolicExpression::tuple(out_nodes); + let mut e = PreSymbolicExpression::tuple(out_nodes.into_boxed_slice()); e.copy_span(&open_tuple.span); return Ok(SetupTupleResult::Closed(e)); } diff --git a/clarity/src/vm/ast/sugar_expander/mod.rs b/clarity/src/vm/ast/sugar_expander/mod.rs index 0f28093932b..7e19758ed45 100644 --- a/clarity/src/vm/ast/sugar_expander/mod.rs +++ b/clarity/src/vm/ast/sugar_expander/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::ast::errors::{ParseError, ParseErrors, ParseResult}; use crate::vm::ast::types::{BuildASTPass, ContractAST, PreExpressionsDrain}; @@ -77,14 +77,14 @@ impl SugarExpander { PreSymbolicExpressionType::List(pre_exprs) => { let drain = PreExpressionsDrain::new(pre_exprs.to_vec().drain(..), None); let expression = self.transform(drain, contract_ast)?; - SymbolicExpression::list(expression) + SymbolicExpression::list(expression.into_boxed_slice()) } PreSymbolicExpressionType::Tuple(pre_exprs) => { let drain = PreExpressionsDrain::new(pre_exprs.to_vec().drain(..), None); let expression = self.transform(drain, contract_ast)?; let mut pairs = expression .chunks(2) - .map(|pair| pair.to_vec()) + .map(|pair| pair.to_vec().into_boxed_slice()) .map(SymbolicExpression::list) .collect::>(); pairs.insert( @@ -96,7 +96,7 @@ impl SugarExpander { .map_err(|_| ParseErrors::InterpreterFailure)?, ), ); - SymbolicExpression::list(pairs) + SymbolicExpression::list(pairs.into_boxed_slice()) } PreSymbolicExpressionType::SugaredContractIdentifier(contract_name) => { let contract_identifier = @@ -205,7 +205,7 @@ mod test { start_column: u32, end_line: u32, end_column: u32, - x: Vec, + x: Box<[PreSymbolicExpression]>, ) -> PreSymbolicExpression { let mut e = PreSymbolicExpression::list(x); e.set_span(start_line, start_column, end_line, end_column); @@ -217,7 +217,7 @@ mod test { start_column: u32, end_line: u32, end_column: u32, - x: Vec, + x: Box<[PreSymbolicExpression]>, ) -> PreSymbolicExpression { let mut e = PreSymbolicExpression::tuple(x); e.set_span(start_line, start_column, end_line, end_column); @@ -277,7 +277,7 @@ mod test { start_column: u32, end_line: u32, end_column: u32, - x: Vec, + x: Box<[SymbolicExpression]>, ) -> SymbolicExpression { let mut e = SymbolicExpression::list(x); e.set_span(start_line, start_column, end_line, end_column); @@ -305,42 +305,42 @@ mod test { 3, 6, 11, - vec![ + Box::new([ make_pre_atom("let", 1, 4, 1, 6), make_pre_list( 1, 8, 1, 20, - vec![ + Box::new([ make_pre_list( 1, 9, 1, 13, - vec![ + Box::new([ make_pre_atom("x", 1, 10, 1, 10), make_pre_atom_value(Value::Int(1), 1, 12, 1, 12), - ], + ]), ), make_pre_list( 1, 15, 1, 19, - vec![ + Box::new([ make_pre_atom("y", 1, 16, 1, 16), make_pre_atom_value(Value::Int(2), 1, 18, 1, 18), - ], + ]), ), - ], + ]), ), make_pre_list( 2, 5, 6, 10, - vec![ + Box::new([ make_pre_atom("+", 2, 6, 2, 6), make_pre_atom("x", 2, 8, 2, 8), make_pre_list( @@ -348,41 +348,41 @@ mod test { 9, 5, 16, - vec![ + Box::new([ make_pre_atom("let", 4, 10, 4, 12), make_pre_list( 4, 14, 4, 20, - vec![make_pre_list( + Box::new([make_pre_list( 4, 15, 4, 19, - vec![ + Box::new([ make_pre_atom("x", 4, 16, 4, 16), make_pre_atom_value(Value::Int(3), 4, 18, 4, 18), - ], - )], + ]), + )]), ), make_pre_list( 5, 9, 5, 15, - vec![ + Box::new([ make_pre_atom("+", 5, 10, 5, 10), make_pre_atom("x", 5, 12, 5, 12), make_pre_atom("y", 5, 14, 5, 14), - ], + ]), ), - ], + ]), ), make_pre_atom("x", 6, 9, 6, 9), - ], + ]), ), - ], + ]), ), make_pre_atom("x", 6, 13, 6, 13), make_pre_atom("y", 6, 15, 6, 15), @@ -395,42 +395,42 @@ mod test { 3, 6, 11, - vec![ + Box::new([ make_atom("let", 1, 4, 1, 6), make_list( 1, 8, 1, 20, - vec![ + Box::new([ make_list( 1, 9, 1, 13, - vec![ + Box::new([ make_atom("x", 1, 10, 1, 10), make_literal_value(Value::Int(1), 1, 12, 1, 12), - ], + ]), ), make_list( 1, 15, 1, 19, - vec![ + Box::new([ make_atom("y", 1, 16, 1, 16), make_literal_value(Value::Int(2), 1, 18, 1, 18), - ], + ]), ), - ], + ]), ), make_list( 2, 5, 6, 10, - vec![ + Box::new([ make_atom("+", 2, 6, 2, 6), make_atom("x", 2, 8, 2, 8), make_list( @@ -438,41 +438,41 @@ mod test { 9, 5, 16, - vec![ + Box::new([ make_atom("let", 4, 10, 4, 12), make_list( 4, 14, 4, 20, - vec![make_list( + Box::new([make_list( 4, 15, 4, 19, - vec![ + Box::new([ make_atom("x", 4, 16, 4, 16), make_literal_value(Value::Int(3), 4, 18, 4, 18), - ], - )], + ]), + )]), ), make_list( 5, 9, 5, 15, - vec![ + Box::new([ make_atom("+", 5, 10, 5, 10), make_atom("x", 5, 12, 5, 12), make_atom("y", 5, 14, 5, 14), - ], + ]), ), - ], + ]), ), make_atom("x", 6, 9, 6, 9), - ], + ]), ), - ], + ]), ), make_atom("x", 6, 13, 6, 13), make_atom("y", 6, 15, 6, 15), @@ -498,29 +498,29 @@ mod test { 1, 1, 9, - vec![ + Box::new([ make_pre_atom("id", 1, 2, 1, 3), make_pre_atom_value(Value::Int(1337), 1, 5, 1, 8), - ], + ]), )]; let ast = vec![make_list( 1, 1, 1, 9, - vec![ + Box::new([ make_atom("tuple", 0, 0, 0, 0), make_list( 0, 0, 0, 0, - vec![ + Box::new([ make_atom("id", 1, 2, 1, 3), make_literal_value(Value::Int(1337), 1, 5, 1, 8), - ], + ]), ), - ], + ]), )]; let contract_id = QualifiedContractIdentifier::parse( "S1G2081040G2081040G2081040G208105NK8PE5.contract-a", @@ -848,7 +848,7 @@ mod test { // ) let pre_foo = make_pre_atom("foo", 2, 4, 2, 6); let pre_comment = make_pre_comment("this is a comment".to_string(), 3, 4, 3, 20); - let pre_ast = vec![make_pre_list(1, 1, 4, 1, vec![pre_foo, pre_comment])]; + let pre_ast = vec![make_pre_list(1, 1, 4, 1, Box::new([pre_foo, pre_comment]))]; let mut foo = make_atom("foo", 2, 4, 2, 6); foo.post_comments = vec![( "this is a comment".to_string(), @@ -859,7 +859,7 @@ mod test { end_column: 20, }, )]; - let list = make_list(1, 1, 4, 1, vec![foo]); + let list = make_list(1, 1, 4, 1, Box::new([foo])); let ast = vec![list]; let contract_id = QualifiedContractIdentifier::parse( diff --git a/clarity/src/vm/ast/traits_resolver/mod.rs b/clarity/src/vm/ast/traits_resolver/mod.rs index 657bc744cef..0b44d5e9128 100644 --- a/clarity/src/vm/ast/traits_resolver/mod.rs +++ b/clarity/src/vm/ast/traits_resolver/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::AnalysisDatabase; use crate::vm::ast::errors::{ParseError, ParseErrors, ParseResult}; @@ -45,13 +45,12 @@ impl TraitsResolver { TraitsResolver {} } - #[cfg_attr(test, mutants::skip)] pub fn run(&mut self, contract_ast: &mut ContractAST) -> ParseResult<()> { let mut referenced_traits = HashMap::new(); for exp in contract_ast.pre_expressions.iter() { // Top-level comment nodes have been filtered from `args` by `try_parse_pre_expr`. - let Some((define_type, args)) = self.try_parse_pre_expr(&exp) else { + let Some((define_type, args)) = self.try_parse_pre_expr(exp) else { continue; }; @@ -72,7 +71,7 @@ impl TraitsResolver { // Traverse and probe for generics nested in the trait definition self.probe_for_generics( - trait_definition.iter(), + trait_definition.iter().collect(), &mut referenced_traits, true, )?; @@ -145,7 +144,7 @@ impl TraitsResolver { | DefineFunctions::PrivateFunction | DefineFunctions::ReadOnlyFunction => { // Traverse and probe for generics in functions type definitions - self.probe_for_generics(args.into_iter(), &mut referenced_traits, true)?; + self.probe_for_generics(args, &mut referenced_traits, true)?; } DefineFunctions::Constant | DefineFunctions::Map @@ -153,21 +152,14 @@ impl TraitsResolver { | DefineFunctions::FungibleToken | DefineFunctions::NonFungibleToken => { if !args.is_empty() { - self.probe_for_generics( - args[1..].to_vec().into_iter(), - &mut referenced_traits, - false, - )?; + self.probe_for_generics(args[1..].to_vec(), &mut referenced_traits, false)?; } } }; } - for (trait_reference, expr) in referenced_traits { - if !contract_ast - .referenced_traits - .contains_key(&trait_reference) - { + for (trait_reference, expr) in referenced_traits.iter() { + if !contract_ast.referenced_traits.contains_key(trait_reference) { let mut err = ParseError::new(ParseErrors::TraitReferenceUnknown( trait_reference.to_string(), )); @@ -185,25 +177,31 @@ impl TraitsResolver { ) -> Option<(DefineFunctions, Vec<&'a PreSymbolicExpression>)> { let expressions = expression.match_list()?; // Filter comment nodes out of the list of expressions. - let mut filtered_expressions = expressions + let filtered_expressions: Vec<&PreSymbolicExpression> = expressions .iter() - .filter(|expr| expr.match_comment().is_none()); - let function_name = filtered_expressions.next()?.match_atom()?; + .filter(|expr| expr.match_comment().is_none()) + .collect(); + let (function_name, args) = filtered_expressions.split_first()?; + let function_name = function_name.match_atom()?; let define_type = DefineFunctions::lookup_by_name(function_name)?; - Some((define_type, filtered_expressions.collect())) + Some((define_type, args.to_vec())) } #[allow(clippy::only_used_in_recursion)] - fn probe_for_generics<'a>( + fn probe_for_generics( &mut self, - exprs: impl Iterator, + exprs: Vec<&PreSymbolicExpression>, referenced_traits: &mut HashMap, should_reference: bool, ) -> ParseResult<()> { - for expression in exprs { + for &expression in exprs.iter() { match &expression.pre_expr { List(list) => { - self.probe_for_generics(list.iter(), referenced_traits, should_reference)?; + self.probe_for_generics( + list.iter().collect(), + referenced_traits, + should_reference, + )?; } TraitReference(trait_name) => { if should_reference { @@ -213,7 +211,11 @@ impl TraitsResolver { } } Tuple(atoms) => { - self.probe_for_generics(atoms.iter(), referenced_traits, should_reference)?; + self.probe_for_generics( + atoms.iter().collect(), + referenced_traits, + should_reference, + )?; } _ => { /* no-op */ } } diff --git a/clarity/src/vm/ast/types.rs b/clarity/src/vm/ast/types.rs index aedd31eae35..2c3679fe331 100644 --- a/clarity/src/vm/ast/types.rs +++ b/clarity/src/vm/ast/types.rs @@ -16,7 +16,7 @@ use std::vec::Drain; -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::ast::errors::ParseResult; use crate::vm::representations::{PreSymbolicExpression, SymbolicExpression, TraitDefinition}; diff --git a/clarity/src/vm/callables.rs b/clarity/src/vm/callables.rs index 9cd991ec971..a99d0d3700e 100644 --- a/clarity/src/vm/callables.rs +++ b/clarity/src/vm/callables.rs @@ -62,14 +62,14 @@ pub enum DefineType { Private, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct DefinedFunction { - identifier: FunctionIdentifier, - name: ClarityName, - arg_types: Vec, + pub identifier: FunctionIdentifier, + pub name: ClarityName, + pub arg_types: Vec, pub define_type: DefineType, - arguments: Vec, - body: SymbolicExpression, + pub arguments: Vec, + pub body: SymbolicExpression, } /// This enum handles the actual invocation of the method @@ -340,8 +340,8 @@ impl DefinedFunction { pub fn apply(&self, args: &[Value], env: &mut Environment) -> Result { match self.define_type { DefineType::Private => self.execute_apply(args, env), - DefineType::Public => env.execute_function_as_transaction(self, args, None, false), - DefineType::ReadOnly => env.execute_function_as_transaction(self, args, None, false), + DefineType::Public => env.execute_function_as_transaction(self, args, None), + DefineType::ReadOnly => env.execute_function_as_transaction(self, args, None), } } @@ -391,14 +391,14 @@ impl CallableType { } impl FunctionIdentifier { - fn new_native_function(name: &str) -> FunctionIdentifier { + pub fn new_native_function(name: &str) -> FunctionIdentifier { let identifier = format!("_native_:{}", name); FunctionIdentifier { identifier: identifier, } } - fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { + pub fn new_user_function(name: &str, context: &str) -> FunctionIdentifier { let identifier = format!("{}:{}", context, name); FunctionIdentifier { identifier: identifier, diff --git a/clarity/src/vm/clarity.rs b/clarity/src/vm/clarity.rs index 11145ab11a8..ff7cc427b6a 100644 --- a/clarity/src/vm/clarity.rs +++ b/clarity/src/vm/clarity.rs @@ -210,7 +210,6 @@ pub trait TransactionConnection: ClarityConnection { cost_track, epoch_id, clarity_version, - false, ); match result { diff --git a/clarity/src/vm/contexts.rs b/clarity/src/vm/contexts.rs index 305c1219883..15dce233562 100644 --- a/clarity/src/vm/contexts.rs +++ b/clarity/src/vm/contexts.rs @@ -18,12 +18,11 @@ use std::collections::{BTreeMap, BTreeSet}; use std::fmt; use std::mem::replace; -use hashbrown::{HashMap, HashSet}; use serde::Serialize; use serde_json::json; use stacks_common::consts::CHAIN_ID_TESTNET; use stacks_common::types::chainstate::StacksBlockId; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use super::EvalHook; use crate::vm::ast::{ASTRules, ContractAST}; @@ -205,7 +204,7 @@ pub struct GlobalContext<'a, 'hooks> { pub eval_hooks: Option>, } -#[derive(Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct ContractContext { pub contract_identifier: QualifiedContractIdentifier, pub variables: HashMap, @@ -1139,7 +1138,8 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { return Err(CheckErrors::CircularReference(vec![func_identifier.to_string()]).into()) } self.call_stack.insert(&func_identifier, true); - let res = self.execute_function_as_transaction(&func, &args, Some(&contract.contract_context), allow_private); + + let res = self.execute_function_as_transaction(&func, &args, Some(&contract.contract_context)); self.call_stack.remove(&func_identifier, true)?; match res { @@ -1167,7 +1167,6 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { function: &DefinedFunction, args: &[Value], next_contract_context: Option<&ContractContext>, - allow_private: bool, ) -> Result { let make_read_only = function.is_read_only(); @@ -1196,7 +1195,7 @@ impl<'a, 'b, 'hooks> Environment<'a, 'b, 'hooks> { self.global_context.roll_back()?; result } else { - self.global_context.handle_tx_result(result, allow_private) + self.global_context.handle_tx_result(result) } } @@ -1726,14 +1725,7 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { self.database.roll_back() } - // the allow_private parameter allows private functions calls to return any Clarity type - // and not just Response. It only has effect is the devtools feature is enabled. eg: - // clarity = { version = "*", features = ["devtools"] } - pub fn handle_tx_result( - &mut self, - result: Result, - allow_private: bool, - ) -> Result { + pub fn handle_tx_result(&mut self, result: Result) -> Result { if let Ok(result) = result { if let Value::Response(data) = result { if data.committed { @@ -1742,9 +1734,6 @@ impl<'a, 'hooks> GlobalContext<'a, 'hooks> { self.roll_back()?; } Ok(Value::Response(data)) - } else if allow_private && cfg!(feature = "devtools") { - self.commit()?; - Ok(result) } else { Err( CheckErrors::PublicFunctionMustReturnResponse(TypeSignature::type_of(&result)?) diff --git a/clarity/src/vm/contracts.rs b/clarity/src/vm/contracts.rs index 1982665aee2..b636325cc44 100644 --- a/clarity/src/vm/contracts.rs +++ b/clarity/src/vm/contracts.rs @@ -25,7 +25,7 @@ use crate::vm::types::{PrincipalData, QualifiedContractIdentifier}; use crate::vm::version::ClarityVersion; use crate::vm::{apply, eval_all, Value}; -#[derive(Serialize, Deserialize)] +#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)] pub struct Contract { pub contract_context: ContractContext, } diff --git a/clarity/src/vm/costs/mod.rs b/clarity/src/vm/costs/mod.rs index 744b6056911..c84b555e0e8 100644 --- a/clarity/src/vm/costs/mod.rs +++ b/clarity/src/vm/costs/mod.rs @@ -17,11 +17,10 @@ use std::collections::BTreeMap; use std::{cmp, fmt}; -use hashbrown::HashMap; use lazy_static::lazy_static; use rusqlite::types::{FromSql, FromSqlResult, ToSql, ToSqlOutput, ValueRef}; use serde::{Deserialize, Serialize}; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::boot_util::boot_code_id; use crate::vm::ast::ContractAST; @@ -207,8 +206,14 @@ impl From for SerializedCostStateSummary { cost_function_references, } = other; SerializedCostStateSummary { - contract_call_circuits: contract_call_circuits.into_iter().collect(), - cost_function_references: cost_function_references.into_iter().collect(), + contract_call_circuits: contract_call_circuits + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + cost_function_references: cost_function_references + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), } } } @@ -786,9 +791,6 @@ impl TrackerData { /// `apply_updates` - tells this function to look for any changes in the cost voting contract /// which would need to be applied. if `false`, just load the last computed cost state in this /// fork. - /// TODO: #4587 add test for Err cases - /// Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] fn load_costs(&mut self, clarity_db: &mut ClarityDatabase, apply_updates: bool) -> Result<()> { clarity_db.begin(); let epoch_id = clarity_db @@ -994,7 +996,7 @@ fn compute_cost( ))); } - let function_invocation = [SymbolicExpression::list(program)]; + let function_invocation = [SymbolicExpression::list(program.into_boxed_slice())]; let eval_result = eval_all( &function_invocation, diff --git a/clarity/src/vm/coverage.rs b/clarity/src/vm/coverage.rs index be8a647e9c9..9034cd183c2 100644 --- a/clarity/src/vm/coverage.rs +++ b/clarity/src/vm/coverage.rs @@ -2,8 +2,8 @@ use std::collections::BTreeMap; use std::fs::File; use std::io::Write; -use hashbrown::{HashMap, HashSet}; use serde_json::Value as JsonValue; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use super::functions::define::DefineFunctionsParsed; use super::EvalHook; diff --git a/clarity/src/vm/database/clarity_db.rs b/clarity/src/vm/database/clarity_db.rs index b395f88c6db..4388e88e58d 100644 --- a/clarity/src/vm/database/clarity_db.rs +++ b/clarity/src/vm/database/clarity_db.rs @@ -472,26 +472,22 @@ impl<'a> ClarityDatabase<'a> { self.store.set_block_hash(bhh, query_pending_data) } - pub fn put_data(&mut self, key: &str, value: &T) -> Result<()> { - self.store.put_data(&key, &value.serialize()) + pub fn put(&mut self, key: &str, value: &T) -> Result<()> { + self.store.put(&key, &value.serialize()) } /// Like `put()`, but returns the serialized byte size of the stored value - pub fn put_data_with_size( - &mut self, - key: &str, - value: &T, - ) -> Result { + pub fn put_with_size(&mut self, key: &str, value: &T) -> Result { let serialized = value.serialize(); - self.store.put_data(&key, &serialized)?; + self.store.put(&key, &serialized)?; Ok(byte_len_of_serialization(&serialized)) } - pub fn get_data(&mut self, key: &str) -> Result> + pub fn get(&mut self, key: &str) -> Result> where T: ClarityDeserializable, { - self.store.get_data::(key) + self.store.get::(key) } pub fn put_value(&mut self, key: &str, value: Value, epoch: &StacksEpochId) -> Result<()> { @@ -528,7 +524,7 @@ impl<'a> ClarityDatabase<'a> { let size = serialized.len() as u64; let hex_serialized = to_hex(serialized.as_slice()); - self.store.put_data(&key, &hex_serialized)?; + self.store.put(&key, &hex_serialized)?; Ok(pre_sanitized_size.unwrap_or(size)) } @@ -544,11 +540,11 @@ impl<'a> ClarityDatabase<'a> { .map_err(|e| InterpreterError::DBError(e.to_string()).into()) } - pub fn get_data_with_proof(&mut self, key: &str) -> Result)>> + pub fn get_with_proof(&mut self, key: &str) -> Result)>> where T: ClarityDeserializable, { - self.store.get_data_with_proof(key) + self.store.get_with_proof(key) } pub fn make_key_for_trip( @@ -791,7 +787,7 @@ impl<'a> ClarityDatabase<'a> { /// The instantiation of subsequent epochs may bump up the epoch version in the clarity DB if /// Clarity is updated in that epoch. pub fn get_clarity_epoch_version(&mut self) -> Result { - let out = match self.get_data(Self::clarity_state_epoch_key())? { + let out = match self.get(Self::clarity_state_epoch_key())? { Some(x) => u32::try_into(x).map_err(|_| { InterpreterError::Expect("Bad Clarity epoch version in stored Clarity state".into()) })?, @@ -802,7 +798,7 @@ impl<'a> ClarityDatabase<'a> { /// Should be called _after_ all of the epoch's initialization has been invoked pub fn set_clarity_epoch_version(&mut self, epoch: StacksEpochId) -> Result<()> { - self.put_data(Self::clarity_state_epoch_key(), &(epoch as u32)) + self.put(Self::clarity_state_epoch_key(), &(epoch as u32)) } /// Returns the _current_ total liquid ustx @@ -1135,12 +1131,12 @@ impl<'a> ClarityDatabase<'a> { pub fn get_stx_btc_ops_processed(&mut self) -> Result { Ok(self - .get_data("vm_pox::stx_btc_ops::processed_blocks")? + .get("vm_pox::stx_btc_ops::processed_blocks")? .unwrap_or(0)) } pub fn set_stx_btc_ops_processed(&mut self, processed: u64) -> Result<()> { - self.put_data("vm_pox::stx_btc_ops::processed_blocks", &processed) + self.put("vm_pox::stx_btc_ops::processed_blocks", &processed) } } @@ -1162,7 +1158,7 @@ impl<'a> ClarityDatabase<'a> { ) -> Result<()> { let key = ClarityDatabase::make_microblock_pubkey_height_key(pubkey_hash); let value = format!("{}", &height); - self.put_data(&key, &value) + self.put(&key, &value) } pub fn get_cc_special_cases_handler(&self) -> Option { @@ -1199,7 +1195,7 @@ impl<'a> ClarityDatabase<'a> { })?; let value_str = to_hex(&value_bytes); - self.put_data(&key, &value_str) + self.put(&key, &value_str) } pub fn get_microblock_pubkey_hash_height( @@ -1207,7 +1203,7 @@ impl<'a> ClarityDatabase<'a> { pubkey_hash: &Hash160, ) -> Result> { let key = ClarityDatabase::make_microblock_pubkey_height_key(pubkey_hash); - self.get_data(&key)? + self.get(&key)? .map(|height_str: String| { height_str.parse::().map_err(|_| { InterpreterError::Expect( @@ -1225,7 +1221,7 @@ impl<'a> ClarityDatabase<'a> { height: u32, ) -> Result> { let key = ClarityDatabase::make_microblock_poison_key(height); - self.get_data(&key)? + self.get(&key)? .map(|reporter_hex_str: String| { let reporter_value = Value::try_deserialize_hex_untyped(&reporter_hex_str) .map_err(|_| { @@ -1780,7 +1776,7 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - self.put_data(&supply_key, &(0_u128))?; + self.put(&supply_key, &(0_u128))?; Ok(data) } @@ -1834,7 +1830,7 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let current_supply: u128 = self.get_data(&key)?.ok_or_else(|| { + let current_supply: u128 = self.get(&key)?.ok_or_else(|| { InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) })?; @@ -1848,7 +1844,7 @@ impl<'a> ClarityDatabase<'a> { } } - self.put_data(&key, &new_supply) + self.put(&key, &new_supply) } pub fn checked_decrease_token_supply( @@ -1862,7 +1858,7 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let current_supply: u128 = self.get_data(&key)?.ok_or_else(|| { + let current_supply: u128 = self.get(&key)?.ok_or_else(|| { InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) })?; @@ -1872,7 +1868,7 @@ impl<'a> ClarityDatabase<'a> { let new_supply = current_supply - amount; - self.put_data(&key, &new_supply) + self.put(&key, &new_supply) } pub fn get_ft_balance( @@ -1893,7 +1889,7 @@ impl<'a> ClarityDatabase<'a> { &principal.serialize(), ); - let result = self.get_data(&key)?; + let result = self.get(&key)?; match result { None => Ok(0), Some(balance) => Ok(balance), @@ -1913,7 +1909,7 @@ impl<'a> ClarityDatabase<'a> { token_name, &principal.serialize(), ); - self.put_data(&key, &balance) + self.put(&key, &balance) } pub fn get_ft_supply( @@ -1926,7 +1922,7 @@ impl<'a> ClarityDatabase<'a> { StoreType::CirculatingSupply, token_name, ); - let supply = self.get_data(&key)?.ok_or_else(|| { + let supply = self.get(&key)?.ok_or_else(|| { InterpreterError::Expect("ERROR: Clarity VM failed to track token supply.".into()) })?; Ok(supply) @@ -2102,7 +2098,7 @@ impl<'a> ClarityDatabase<'a> { pub fn get_account_stx_balance(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_balance(principal); debug!("Fetching account balance"; "principal" => %principal.to_string()); - let result = self.get_data(&key)?; + let result = self.get(&key)?; Ok(match result { None => STXBalance::zero(), Some(balance) => balance, @@ -2111,7 +2107,7 @@ impl<'a> ClarityDatabase<'a> { pub fn get_account_nonce(&mut self, principal: &PrincipalData) -> Result { let key = ClarityDatabase::make_key_for_account_nonce(principal); - let result = self.get_data(&key)?; + let result = self.get(&key)?; Ok(match result { None => 0, Some(nonce) => nonce, @@ -2120,7 +2116,7 @@ impl<'a> ClarityDatabase<'a> { pub fn set_account_nonce(&mut self, principal: &PrincipalData, nonce: u64) -> Result<()> { let key = ClarityDatabase::make_key_for_account_nonce(principal); - self.put_data(&key, &nonce) + self.put(&key, &nonce) } } diff --git a/clarity/src/vm/database/clarity_store.rs b/clarity/src/vm/database/clarity_store.rs index afe2c550ba6..f093c5a3c85 100644 --- a/clarity/src/vm/database/clarity_store.rs +++ b/clarity/src/vm/database/clarity_store.rs @@ -58,14 +58,14 @@ pub type SpecialCaseHandler = &'static dyn Fn( // attempt to continue processing in the event of an unexpected storage error. pub trait ClarityBackingStore { /// put K-V data into the committed datastore - fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()>; + fn put_all(&mut self, items: Vec<(String, String)>) -> Result<()>; /// fetch K-V out of the committed datastore - fn get_data(&mut self, key: &str) -> Result>; + fn get(&mut self, key: &str) -> Result>; /// fetch K-V out of the committed datastore, along with the byte representation /// of the Merkle proof for that key-value pair - fn get_data_with_proof(&mut self, key: &str) -> Result)>>; + fn get_with_proof(&mut self, key: &str) -> Result)>>; fn has_entry(&mut self, key: &str) -> Result { - Ok(self.get_data(key)?.is_some()) + Ok(self.get(key)?.is_some()) } /// change the current MARF context to service reads from a different chain_tip @@ -109,7 +109,7 @@ pub trait ClarityBackingStore { ) -> Result<(StacksBlockId, Sha512Trunc256Sum)> { let key = make_contract_hash_key(contract); let contract_commitment = self - .get_data(&key)? + .get(&key)? .map(|x| ContractCommitment::deserialize(&x)) .ok_or_else(|| CheckErrors::NoSuchContract(contract.to_string()))?; let ContractCommitment { @@ -232,11 +232,11 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't set block hash") } - fn get_data(&mut self, _key: &str) -> Result> { + fn get(&mut self, _key: &str) -> Result> { panic!("NullBackingStore can't retrieve data") } - fn get_data_with_proof(&mut self, _key: &str) -> Result)>> { + fn get_with_proof(&mut self, _key: &str) -> Result)>> { panic!("NullBackingStore can't retrieve data") } @@ -260,7 +260,7 @@ impl ClarityBackingStore for NullBackingStore { panic!("NullBackingStore can't get current block height") } - fn put_all_data(&mut self, mut _items: Vec<(String, String)>) -> Result<()> { + fn put_all(&mut self, mut _items: Vec<(String, String)>) -> Result<()> { panic!("NullBackingStore cannot put") } } @@ -301,11 +301,11 @@ impl ClarityBackingStore for MemoryBackingStore { Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) } - fn get_data(&mut self, key: &str) -> Result> { + fn get(&mut self, key: &str) -> Result> { SqliteConnection::get(self.get_side_store(), key) } - fn get_data_with_proof(&mut self, key: &str) -> Result)>> { + fn get_with_proof(&mut self, key: &str) -> Result)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } @@ -337,7 +337,7 @@ impl ClarityBackingStore for MemoryBackingStore { None } - fn put_all_data(&mut self, items: Vec<(String, String)>) -> Result<()> { + fn put_all(&mut self, items: Vec<(String, String)>) -> Result<()> { for (key, value) in items.into_iter() { SqliteConnection::put(self.get_side_store(), &key, &value)?; } diff --git a/clarity/src/vm/database/key_value_wrapper.rs b/clarity/src/vm/database/key_value_wrapper.rs index 69eb74b39ed..b66d8e91aef 100644 --- a/clarity/src/vm/database/key_value_wrapper.rs +++ b/clarity/src/vm/database/key_value_wrapper.rs @@ -16,9 +16,8 @@ use std::hash::Hash; -use hashbrown::HashMap; use stacks_common::types::chainstate::StacksBlockId; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::Sha512Trunc256Sum; use super::clarity_store::SpecialCaseHandler; @@ -284,7 +283,7 @@ impl<'a> RollbackWrapper<'a> { let all_edits = rollback_check_pre_bottom_commit(last_item.edits, &mut self.lookup_map)?; if all_edits.len() > 0 { - self.store.put_all_data(all_edits).map_err(|e| { + self.store.put_all(all_edits).map_err(|e| { InterpreterError::Expect(format!( "ERROR: Failed to commit data to sql store: {e:?}" )) @@ -308,7 +307,7 @@ impl<'a> RollbackWrapper<'a> { } } -fn inner_put_data( +fn inner_put( lookup_map: &mut HashMap>, edits: &mut Vec<(T, RollbackValueCheck)>, key: T, @@ -322,14 +321,14 @@ fn inner_put_data( } impl<'a> RollbackWrapper<'a> { - pub fn put_data(&mut self, key: &str, value: &str) -> InterpreterResult<()> { + pub fn put(&mut self, key: &str, value: &str) -> InterpreterResult<()> { let current = self.stack.last_mut().ok_or_else(|| { InterpreterError::Expect( "ERROR: Clarity VM attempted PUT on non-nested context.".into(), ) })?; - Ok(inner_put_data( + Ok(inner_put( &mut self.lookup_map, &mut current.edits, key.to_string(), @@ -359,17 +358,17 @@ impl<'a> RollbackWrapper<'a> { /// this function will only return commitment proofs for values _already_ materialized /// in the underlying store. otherwise it returns None. - pub fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> + pub fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> where T: ClarityDeserializable, { self.store - .get_data_with_proof(key)? + .get_with_proof(key)? .map(|(value, proof)| Ok((T::deserialize(&value)?, proof))) .transpose() } - pub fn get_data(&mut self, key: &str) -> InterpreterResult> + pub fn get(&mut self, key: &str) -> InterpreterResult> where T: ClarityDeserializable, { @@ -386,10 +385,7 @@ impl<'a> RollbackWrapper<'a> { } } // otherwise, lookup from store - self.store - .get_data(key)? - .map(|x| T::deserialize(&x)) - .transpose() + self.store.get(key)?.map(|x| T::deserialize(&x)).transpose() } pub fn deserialize_value( @@ -426,7 +422,7 @@ impl<'a> RollbackWrapper<'a> { return Ok(Some(Self::deserialize_value(x, expected, epoch)?)); } } - let stored_data = self.store.get_data(key).map_err(|_| { + let stored_data = self.store.get(key).map_err(|_| { SerializationError::DeserializationError("ERROR: Clarity backing store failure".into()) })?; match stored_data { @@ -452,7 +448,7 @@ impl<'a> RollbackWrapper<'a> { ) -> InterpreterResult<()> { let key = make_contract_hash_key(contract); let value = self.store.make_contract_commitment(content_hash); - self.put_data(&key, &value) + self.put(&key, &value) } pub fn insert_metadata( @@ -469,7 +465,7 @@ impl<'a> RollbackWrapper<'a> { let metadata_key = (contract.clone(), key.to_string()); - Ok(inner_put_data( + Ok(inner_put( &mut self.metadata_lookup_map, &mut current.metadata_edits, metadata_key, diff --git a/clarity/src/vm/database/mod.rs b/clarity/src/vm/database/mod.rs index 1092992982a..f0297ef18e7 100644 --- a/clarity/src/vm/database/mod.rs +++ b/clarity/src/vm/database/mod.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use hashbrown::HashMap; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; pub use self::clarity_db::{ BurnStateDB, ClarityDatabase, HeadersDB, StoreType, NULL_BURN_STATE_DB, NULL_HEADER_DB, @@ -33,3 +33,5 @@ pub mod clarity_store; mod key_value_wrapper; mod sqlite; mod structures; +#[cfg(test)] +mod tests; diff --git a/clarity/src/vm/database/structures.rs b/clarity/src/vm/database/structures.rs index e4fab929bd0..937eda2bdc8 100644 --- a/clarity/src/vm/database/structures.rs +++ b/clarity/src/vm/database/structures.rs @@ -375,7 +375,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { pub fn save(self) -> Result<()> { let key = ClarityDatabase::make_key_for_account_balance(&self.principal); - self.db_ref.put_data(&key, &self.balance) + self.db_ref.put(&key, &self.balance) } pub fn transfer_to(mut self, recipient: &PrincipalData, amount: u128) -> Result<()> { @@ -386,7 +386,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { let recipient_key = ClarityDatabase::make_key_for_account_balance(recipient); let mut recipient_balance = self .db_ref - .get_data(&recipient_key)? + .get(&recipient_key)? .unwrap_or(STXBalance::zero()); recipient_balance @@ -394,7 +394,7 @@ impl<'db, 'conn> STXBalanceSnapshot<'db, 'conn> { .ok_or(Error::Runtime(RuntimeErrorType::ArithmeticOverflow, None))?; self.debit(amount)?; - self.db_ref.put_data(&recipient_key, &recipient_balance)?; + self.db_ref.put(&recipient_key, &recipient_balance)?; self.save()?; Ok(()) } diff --git a/clarity/src/vm/database/tests/clarity_db_tests.proptest-regressions b/clarity/src/vm/database/tests/clarity_db_tests.proptest-regressions new file mode 100644 index 00000000000..d33e1207fa7 --- /dev/null +++ b/clarity/src/vm/database/tests/clarity_db_tests.proptest-regressions @@ -0,0 +1,7 @@ +# Seeds for failure cases proptest has generated in the past. It is +# automatically read and these particular cases re-run before any +# novel cases are generated. +# +# It is recommended to check this file in to source control so that +# everyone who runs the test benefits from these saved cases. +cc 44a2de005734c41ec4acdb6ee75d149396bd7eef4da14a5394136bf2709c57bc # shrinks to contract = Contract { contract_context: ContractContext { contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(SFZ9QYBVSPEMR0N5TNE04KKADNKK5CC4WGPVJ9E3), name: ContractName("a") }, variables: StacksHashMap({}), functions: StacksHashMap({ClarityName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"): DefinedFunction { identifier: FunctionIdentifier { identifier: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa:aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" }, name: ClarityName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), arg_types: [], define_type: Public, arguments: [ClarityName("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")], body: SymbolicExpression { expr: Field(TraitIdentifier { name: ClarityName("aaaqxfqzdtenocsexxufoapwudixroehnpfrwejw"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(SXVR4HA3ZP608RM0VW842ME3MBGPPGME3AXR05PG), name: ContractName("CIzVzDUEJpztkQgAnXpbQ") } }), id: 8322152634755796411 } }}), defined_traits: StacksHashMap({ClarityName("yqjoixyngkmxxymzotyuprvihevanobhauphmaea"): {ClarityName("fwnbpjiolicgdlvidojzwsxnukukgnorxftwxxib"): FunctionSignature { args: [OptionalType(BoolType), ResponseType((IntType, NoType))], returns: OptionalType(SequenceType(StringType(UTF8(StringUTF8Length(14))))) }}}), implemented_traits: StacksHashSet({TraitIdentifier { name: ClarityName("amuosfpuavlsrfxrjgpiovauqrrbztmrdeyvowae"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(SP1J7XCE1PBAP3T5RXQB198714S6VB5NCWC15XWWH), name: ContractName("ohTSbunZmLnHHLKmOXW") } }, TraitIdentifier { name: ClarityName("btsssgqmytkeuvrcevlvfjubrqgidbaiyfhbpeqf"), contract_identifier: QualifiedContractIdentifier { issuer: StandardPrincipalData(S02599A868C3Z6JZSTED37HHKV3XTT4QVS80JEYMR), name: ContractName("MgMNMSrWqgrXYaDBPHkFWVZ") } }}), persisted_names: StacksHashSet({ClarityName("vwmatxcbwcqikstlcpobbbgrwkztotfvtqbvrwws"), ClarityName("bnvgwubsasnhjsgxqyqaigaxgnxvmttrlzphzhgu"), ClarityName("edjpvbitaeszopxsyfeecylkhwiudexmklwbnzrt")}), meta_data_map: StacksHashMap({ClarityName("rccoqoirtsdyqkctrbsvoakrhbzufgbhsborwbxz"): DataMapMetadata { key_type: TupleType(TupleTypeSignature { "IzFLwirBWTTeQxCW": (buff 114), "KpRwliLw": uint, "SpPPzWMrDrYq": (buff 62), "UGwUCEU": (buff 61), "kstVrzo": (string-utf8 31), "yFtTFUqvIRm": (string-utf8 17),}), value_type: ResponseType((PrincipalType, SequenceType(StringType(UTF8(StringUTF8Length(22)))))) }, ClarityName("hzimmazvkedqordbdaywazyskqdhdzzqusdnedwo"): DataMapMetadata { key_type: ResponseType((UIntType, BoolType)), value_type: TupleType(TupleTypeSignature { "DXboPISrAIudN": int, "PRhPHewsyHFff": (response (string-utf8 20) (string-utf8 15)), "lIO": (string-ascii 127), "mvQRanZgRpcGcct": uint,}) }, ClarityName("txwizbtmdrypbggfmnuqergiszydhxnlorlsbtwa"): DataMapMetadata { key_type: OptionalType(SequenceType(StringType(ASCII(BufferLength(79))))), value_type: ResponseType((NoType, SequenceType(StringType(ASCII(BufferLength(123)))))) }}), meta_data_var: StacksHashMap({ClarityName("kevhnvabdsujgvsxnixkcnjxlsaqwmwcdhjrrwfi"): DataVariableMetadata { value_type: SequenceType(ListType(ListTypeData { max_len: 12, entry_type: SequenceType(BufferType(BufferLength(100))) })) }, ClarityName("kifjvjytlvijyxnnhdbpmdppttpbugjtvpcjspvv"): DataVariableMetadata { value_type: OptionalType(SequenceType(StringType(ASCII(BufferLength(88))))) }}), meta_nft: StacksHashMap({ClarityName("flnkzucidxkpmohmioosxupwhvaugcczrexdxyky"): NonFungibleTokenMetadata { key_type: OptionalType(UIntType) }, ClarityName("zualwtsklswtrycjhvzajfhjikeayznwxavurltn"): NonFungibleTokenMetadata { key_type: UIntType }}), meta_ft: StacksHashMap({ClarityName("raaodzfstgmvfmhmzpbkmolwmuwlymlbalsntqme"): FungibleTokenMetadata { total_supply: None }}), data_size: 20, clarity_version: Clarity1 } } diff --git a/clarity/src/vm/database/tests/clarity_db_tests.rs b/clarity/src/vm/database/tests/clarity_db_tests.rs new file mode 100644 index 00000000000..bc3c833c08b --- /dev/null +++ b/clarity/src/vm/database/tests/clarity_db_tests.rs @@ -0,0 +1,186 @@ +use proptest::prelude::*; +use rusqlite::NO_PARAMS; +use stacks_common::proptesting::sha_512_trunc_256_sum; +use stacks_common::util::hash::Sha512Trunc256Sum; + +use crate::proptesting::*; +use crate::vm::contracts::Contract; +use crate::vm::database::clarity_store::ContractCommitment; +use crate::vm::database::{ + ClarityBackingStore, ClarityDatabase, ClaritySerializable, MemoryBackingStore, + NULL_BURN_STATE_DB, NULL_HEADER_DB, +}; +use crate::vm::Value; + +proptest! { + #[test] + fn insert_contract(contract in contract()) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.begin(); + + let contract_id = contract.contract_context.contract_identifier.clone(); + + db.insert_contract(&contract_id, contract) + .expect("failed to insert contract into backing store"); + + let exists = sql_metadata_table_key_count(&mut store, &contract_id.to_string()) > 0; + assert!(!exists); + } + + #[test] + fn get_contract(contract in contract()) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.begin(); + + let contract_id = contract.contract_context.contract_identifier.clone(); + + db.insert_contract(&contract_id, contract.clone()) + .expect("failed to insert contract into backing store"); + + let retrieved_contract = db + .get_contract(&contract_id) + .expect("failed to retrieve contract from backing store"); + + assert_eq!(contract, retrieved_contract); + } + + #[test] + fn insert_contract_without_begin_should_fail(contract in contract()) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + let contract_id = contract.contract_context.contract_identifier.clone(); + + db.insert_contract(&contract_id, contract) + .expect_err("inserting contract without a begin should fail"); + } + + #[test] + fn insert_contract_with_commit_should_exist_in_backing_store(contract in contract()) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.begin(); + + let contract_id = contract.contract_context.contract_identifier.clone(); + + db.insert_contract(&contract_id, contract.clone()) + .expect("failed to insert contract into backing store"); + + db.commit().expect("failed to commit to backing store"); + + let contract_key = format!( + "clr-meta::{}::vm-metadata::9::contract", + contract_id.to_string() + ); + + let count = sql_metadata_table_key_count(&mut store, &contract_key); + + assert_eq!(1, count); + } + + #[test] + fn put_data_no_commit( + key in any::(), + block_height in any::(), + hash in sha_512_trunc_256_sum() + ) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.begin(); + + db.put( + &key, + &ContractCommitment { + block_height, + hash, + }, + ) + .expect("failed to put data"); + + let count = sql_data_table_key_count(&mut store, &key.to_string()); + assert_eq!(0, count); + } + + #[test] + fn put_data_with_commit_should_exist_in_backing_store( + key in any::(), + block_height in any::(), + hash in sha_512_trunc_256_sum() + ) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.begin(); + + db.put( + &key, + &ContractCommitment { + block_height, + hash, + }, + ) + .expect("failed to put data"); + + db.commit().expect("failed to commit to backing store"); + + let count = sql_data_table_key_count(&mut store, &key.to_string()); + assert_eq!(1, count); + } + + #[test] + fn put_data_without_begin_fails( + key in any::(), + block_height in any::(), + hash in sha_512_trunc_256_sum() + ) { + let mut store = MemoryBackingStore::new(); + let mut db = ClarityDatabase::new(&mut store, &NULL_HEADER_DB, &NULL_BURN_STATE_DB); + + db.put( + &key, + &ContractCommitment { + block_height, + hash, + }, + ) + .expect_err("expected not-nested error"); + } +} + +/// Returns the number of rows in the metadata table for the provided key. +fn sql_metadata_table_key_count(store: &mut S, key: &str) -> u32 { + let sqlite = store.get_side_store(); + let count = sqlite + .query_row( + "SELECT COUNT(*) FROM metadata_table WHERE key = ?1;", + &[key], + |row| { + let i: u32 = row.get(0)?; + Ok(i) + }, + ) + .expect("failed to verify results in sqlite"); + count +} + +/// Returns the number of rows in the `data_table` with the given key. +fn sql_data_table_key_count(store: &mut S, key: &str) -> u32 { + let sqlite = store.get_side_store(); + let count = sqlite + .query_row( + "SELECT COUNT(*) FROM data_table WHERE key = ?1;", + &[key], + |row| { + let i: u32 = row.get(0)?; + Ok(i) + }, + ) + .expect("failed to verify results in sqlite"); + count +} diff --git a/clarity/src/vm/database/tests/mod.rs b/clarity/src/vm/database/tests/mod.rs new file mode 100644 index 00000000000..7c49f7d37e3 --- /dev/null +++ b/clarity/src/vm/database/tests/mod.rs @@ -0,0 +1 @@ +mod clarity_db_tests; diff --git a/clarity/src/vm/docs/contracts.rs b/clarity/src/vm/docs/contracts.rs index 7426be7966f..37530189580 100644 --- a/clarity/src/vm/docs/contracts.rs +++ b/clarity/src/vm/docs/contracts.rs @@ -1,8 +1,7 @@ use std::collections::BTreeMap; -use hashbrown::{HashMap, HashSet}; use stacks_common::consts::CHAIN_ID_TESTNET; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use crate::vm::analysis::{mem_type_check, ContractAnalysis}; use crate::vm::ast::{build_ast_with_rules, ASTRules}; diff --git a/clarity/src/vm/docs/mod.rs b/clarity/src/vm/docs/mod.rs index e0b78403b93..205fdc6170d 100644 --- a/clarity/src/vm/docs/mod.rs +++ b/clarity/src/vm/docs/mod.rs @@ -2917,7 +2917,7 @@ mod test { .type_map .as_ref() .unwrap() - .get_type_expected(&analysis.expressions.last().unwrap()) + .get_type(&analysis.expressions.last().unwrap()) .cloned(), ); } diff --git a/clarity/src/vm/mod.rs b/clarity/src/vm/mod.rs index bca5223828f..b5a8fe1cc8f 100644 --- a/clarity/src/vm/mod.rs +++ b/clarity/src/vm/mod.rs @@ -586,9 +586,8 @@ pub fn execute_v2(program: &str) -> Result> { #[cfg(test)] mod test { - use hashbrown::HashMap; use stacks_common::consts::CHAIN_ID_TESTNET; - use stacks_common::types::StacksEpochId; + use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use super::ClarityVersion; use crate::vm::callables::{DefineType, DefinedFunction}; @@ -609,16 +608,16 @@ mod test { // (define a 59) // (do_work a) // - let content = [SymbolicExpression::list(vec![ + let content = [SymbolicExpression::list(Box::new([ SymbolicExpression::atom("do_work".into()), SymbolicExpression::atom("a".into()), - ])]; + ]))]; - let func_body = SymbolicExpression::list(vec![ + let func_body = SymbolicExpression::list(Box::new([ SymbolicExpression::atom("+".into()), SymbolicExpression::atom_value(Value::Int(5)), SymbolicExpression::atom("x".into()), - ]); + ])); let func_args = vec![("x".into(), TypeSignature::IntType)]; let user_function = DefinedFunction::new( diff --git a/clarity/src/vm/representations.rs b/clarity/src/vm/representations.rs index c80e3c74672..15e674eb13c 100644 --- a/clarity/src/vm/representations.rs +++ b/clarity/src/vm/representations.rs @@ -170,8 +170,8 @@ impl StacksMessageCodec for ContractName { pub enum PreSymbolicExpressionType { AtomValue(Value), Atom(ClarityName), - List(Vec), - Tuple(Vec), + List(Box<[PreSymbolicExpression]>), + Tuple(Box<[PreSymbolicExpression]>), SugaredContractIdentifier(ContractName), SugaredFieldIdentifier(ContractName, ClarityName), FieldIdentifier(TraitIdentifier), @@ -323,14 +323,14 @@ impl PreSymbolicExpression { } } - pub fn list(val: Vec) -> PreSymbolicExpression { + pub fn list(val: Box<[PreSymbolicExpression]>) -> PreSymbolicExpression { PreSymbolicExpression { pre_expr: PreSymbolicExpressionType::List(val), ..PreSymbolicExpression::cons() } } - pub fn tuple(val: Vec) -> PreSymbolicExpression { + pub fn tuple(val: Box<[PreSymbolicExpression]>) -> PreSymbolicExpression { PreSymbolicExpression { pre_expr: PreSymbolicExpressionType::Tuple(val), ..PreSymbolicExpression::cons() @@ -412,7 +412,7 @@ impl PreSymbolicExpression { pub enum SymbolicExpressionType { AtomValue(Value), Atom(ClarityName), - List(Vec), + List(Box<[SymbolicExpression]>), LiteralValue(Value), Field(TraitIdentifier), TraitReference(ClarityName, TraitDefinition), @@ -544,7 +544,7 @@ impl SymbolicExpression { } } - pub fn list(val: Vec) -> SymbolicExpression { + pub fn list(val: Box<[SymbolicExpression]>) -> SymbolicExpression { SymbolicExpression { expr: SymbolicExpressionType::List(val), ..SymbolicExpression::cons() diff --git a/clarity/src/vm/tests/principals.rs b/clarity/src/vm/tests/principals.rs index 78fcf176594..625e1a162fb 100644 --- a/clarity/src/vm/tests/principals.rs +++ b/clarity/src/vm/tests/principals.rs @@ -1,5 +1,4 @@ -use hashbrown::HashMap; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::hex_bytes; use crate::vm::ast::ASTRules; diff --git a/clarity/src/vm/types/mod.rs b/clarity/src/vm/types/mod.rs index 46734dcc517..abcb43a6ca8 100644 --- a/clarity/src/vm/types/mod.rs +++ b/clarity/src/vm/types/mod.rs @@ -23,7 +23,6 @@ use std::collections::btree_map::Entry; use std::collections::BTreeMap; use std::{char, cmp, fmt, str}; -use hashbrown::hash_map::OccupiedEntry; use regex::Regex; use stacks_common::address::c32; use stacks_common::types::chainstate::StacksAddress; @@ -108,11 +107,6 @@ impl QualifiedContractIdentifier { } } - /// Was this contract issued by the null issuer address? (i.e., is it a "boot contract") - pub fn is_boot(&self) -> bool { - self.issuer.1 == [0; 20] - } - pub fn parse(literal: &str) -> Result { let split: Vec<_> = literal.splitn(2, '.').collect(); if split.len() != 2 { @@ -1529,10 +1523,6 @@ impl TupleData { self.data_map.is_empty() } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] pub fn from_data(data: Vec<(ClarityName, Value)>) -> Result { let mut type_map = BTreeMap::new(); let mut data_map = BTreeMap::new(); @@ -1549,10 +1539,6 @@ impl TupleData { Self::new(TupleTypeSignature::try_from(type_map)?, data_map) } - ///TODO: #4587 create default for TupleData, then check if the mutation tests are caught for the case: - /// Ok((Default::default())) - /// Or keep the skip and remove the comment - #[cfg_attr(test, mutants::skip)] pub fn from_data_typed( epoch: &StacksEpochId, data: Vec<(ClarityName, Value)>, diff --git a/clarity/src/vm/types/serialization.rs b/clarity/src/vm/types/serialization.rs index 7dcda788a8f..a51ffdc01cc 100644 --- a/clarity/src/vm/types/serialization.rs +++ b/clarity/src/vm/types/serialization.rs @@ -17,11 +17,10 @@ use std::io::{Read, Write}; use std::{cmp, error, fmt, str}; -use hashbrown::HashMap; use lazy_static::lazy_static; use serde_json::Value as JSONValue; use stacks_common::codec::{Error as codec_error, StacksMessageCodec}; -use stacks_common::types::StacksEpochId; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::BoundReader; @@ -559,7 +558,7 @@ impl Value { } }; - if bytes_read > expect_size as u64 { + if expect_size as u64 > bytes_read { // this can happen due to sanitization, so its no longer indicative of a *problem* with the node. debug!( "Deserialized more bytes than expected size during deserialization. Expected size = {}, bytes read = {}, type = {}", diff --git a/clarity/src/vm/types/signatures.rs b/clarity/src/vm/types/signatures.rs index c9971f97aeb..05c8e722dc3 100644 --- a/clarity/src/vm/types/signatures.rs +++ b/clarity/src/vm/types/signatures.rs @@ -15,15 +15,15 @@ // along with this program. If not, see . use std::collections::btree_map::Entry; -use std::collections::{hash_map, BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::hash::{Hash, Hasher}; use std::{cmp, fmt}; -// TypeSignatures -use hashbrown::HashSet; use lazy_static::lazy_static; use stacks_common::address::c32; use stacks_common::types::StacksEpochId; +// TypeSignatures +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash; use crate::vm::costs::{cost_functions, runtime_cost, CostOverflowingMath}; @@ -76,14 +76,14 @@ impl AssetIdentifier { #[derive(Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct TupleTypeSignature { - type_map: HashMap, + pub type_map: BTreeMap, } #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct BufferLength(u32); +pub struct BufferLength(pub u32); #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct StringUTF8Length(u32); +pub struct StringUTF8Length(pub u32); // INVARIANTS enforced by the Type Signatures. // 1. A TypeSignature constructor will always fail rather than construct a @@ -221,8 +221,8 @@ pub const UTF8_40: TypeSignature = SequenceType(SequenceSubtype::StringType(Stri #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub struct ListTypeData { - max_len: u32, - entry_type: Box, + pub max_len: u32, + pub entry_type: Box, } #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] @@ -787,7 +787,7 @@ impl TypeSignature { inner_type.1.canonicalize_v2_1(), ))), TupleType(ref tuple_sig) => { - let mut canonicalized_fields = HashMap::new(); + let mut canonicalized_fields = BTreeMap::new(); for (field_name, field_type) in tuple_sig.get_type_map() { canonicalized_fields.insert(field_name.clone(), field_type.canonicalize_v2_1()); } @@ -808,7 +808,7 @@ impl TypeSignature { ListUnionType(types) => { let mut is_trait = None; let mut is_principal = true; - for partial in types { + for partial in types.iter() { match partial { CallableSubtype::Principal(_) => { if is_trait.is_some() { @@ -851,9 +851,9 @@ impl TryFrom> for TupleTypeSignature { return Err(CheckErrors::EmptyTuplesNotAllowed); } - let mut type_map = HashMap::new(); + let mut type_map = BTreeMap::new(); for (name, type_info) in type_data.into_iter() { - if let hash_map::Entry::Vacant(e) = type_map.entry(name.clone()) { + if let Entry::Vacant(e) = type_map.entry(name.clone()) { e.insert(type_info); } else { return Err(CheckErrors::NameAlreadyUsed(name.into())); @@ -866,30 +866,6 @@ impl TryFrom> for TupleTypeSignature { impl TryFrom> for TupleTypeSignature { type Error = CheckErrors; fn try_from(type_map: BTreeMap) -> Result { - if type_map.is_empty() { - return Err(CheckErrors::EmptyTuplesNotAllowed); - } - for child_sig in type_map.values() { - if (1 + child_sig.depth()) > MAX_TYPE_DEPTH { - return Err(CheckErrors::TypeSignatureTooDeep); - } - } - let type_map = type_map.into_iter().collect(); - let result = TupleTypeSignature { type_map }; - let would_be_size = result - .inner_size()? - .ok_or_else(|| CheckErrors::ValueTooLarge)?; - if would_be_size > MAX_VALUE_SIZE { - Err(CheckErrors::ValueTooLarge) - } else { - Ok(result) - } - } -} - -impl TryFrom> for TupleTypeSignature { - type Error = CheckErrors; - fn try_from(type_map: HashMap) -> Result { if type_map.is_empty() { return Err(CheckErrors::EmptyTuplesNotAllowed); } @@ -925,7 +901,7 @@ impl TupleTypeSignature { self.type_map.get(field) } - pub fn get_type_map(&self) -> &HashMap { + pub fn get_type_map(&self) -> &BTreeMap { &self.type_map } @@ -961,7 +937,7 @@ impl TupleTypeSignature { } pub fn shallow_merge(&mut self, update: &mut TupleTypeSignature) { - self.type_map.extend(update.type_map.drain()); + self.type_map.append(&mut update.type_map); } } @@ -1354,7 +1330,7 @@ impl TypeSignature { if x == y { Ok(a.clone()) } else { - Ok(ListUnionType(HashSet::from([x.clone(), y.clone()]))) + Ok(ListUnionType(HashSet::from_iter([x.clone(), y.clone()]))) } } (ListUnionType(l), CallableType(c)) | (CallableType(c), ListUnionType(l)) => { @@ -1366,7 +1342,7 @@ impl TypeSignature { | (CallableType(CallableSubtype::Principal(_)), PrincipalType) => Ok(PrincipalType), (PrincipalType, ListUnionType(l)) | (ListUnionType(l), PrincipalType) => { let mut all_principals = true; - for ty in l { + for ty in l.iter() { match ty { CallableSubtype::Trait(_) => { all_principals = false; @@ -1969,9 +1945,7 @@ pub fn parse_name_type_pairs( impl fmt::Display for TupleTypeSignature { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "(tuple")?; - let mut type_strs: Vec<_> = self.type_map.iter().collect(); - type_strs.sort_unstable_by_key(|x| x.0); - for (field_name, field_type) in type_strs { + for (field_name, field_type) in self.type_map.iter() { write!(f, " ({} {})", &**field_name, field_type)?; } write!(f, ")") @@ -2157,7 +2131,7 @@ mod test { contract_identifier: QualifiedContractIdentifier::transient(), }), ]; - let list_union = ListUnionType(callables.clone().into()); + let list_union = ListUnionType(callables.to_vec().into_iter().collect()); let callables2 = [ CallableSubtype::Principal(QualifiedContractIdentifier::local("bar").unwrap()), CallableSubtype::Trait(TraitIdentifier { @@ -2165,7 +2139,7 @@ mod test { contract_identifier: QualifiedContractIdentifier::transient(), }), ]; - let list_union2 = ListUnionType(callables2.clone().into()); + let list_union2 = ListUnionType(callables2.to_vec().into_iter().collect()); let list_union_merged = ListUnionType(HashSet::from_iter( [callables, callables2].concat().iter().cloned(), )); @@ -2173,7 +2147,8 @@ mod test { CallableSubtype::Principal(QualifiedContractIdentifier::local("foo").unwrap()), CallableSubtype::Principal(QualifiedContractIdentifier::local("bar").unwrap()), ]; - let list_union_principals = ListUnionType(callable_principals.into()); + let list_union_principals = + ListUnionType(callable_principals.to_vec().into_iter().collect()); let notype_pairs = [ // NoType with X should result in X diff --git a/contrib/core-contract-tests/deployments/default.simnet-plan.yaml b/contrib/core-contract-tests/deployments/default.simnet-plan.yaml deleted file mode 100644 index 573a58fe51b..00000000000 --- a/contrib/core-contract-tests/deployments/default.simnet-plan.yaml +++ /dev/null @@ -1,87 +0,0 @@ ---- -id: 0 -name: "Simulated deployment, used as a default for `clarinet console`, `clarinet test` and `clarinet check`" -network: simnet -genesis: - wallets: - - name: deployer - address: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - balance: "100000000000000" - - name: wallet_1 - address: ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5 - balance: "100000000000000" - - name: wallet_2 - address: ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG - balance: "100000000000000" - - name: wallet_3 - address: ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC - balance: "100000000000000" - - name: wallet_4 - address: ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND - balance: "100000000000000" - - name: wallet_5 - address: ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB - balance: "100000000000000" - - name: wallet_6 - address: ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0 - balance: "100000000000000" - - name: wallet_7 - address: ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ - balance: "100000000000000" - - name: wallet_8 - address: ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP - balance: "100000000000000" - - name: wallet_9 - address: STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6 - balance: "100000000000000" - contracts: - - costs - - pox - - pox-2 - - pox-3 - - pox-4 - - lockup - - costs-2 - - costs-3 - - cost-voting - - bns -plan: - batches: - - id: 0 - transactions: - - emulated-contract-publish: - contract-name: bns - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "../../stackslib/src/chainstate/stacks/boot/bns.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: bns_test - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "./tests/bns_test.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: pox-4 - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "../../stackslib/src/chainstate/stacks/boot/pox-4.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: pox-mainnet - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "../../stackslib/src/chainstate/stacks/boot/pox-mainnet.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: pox_4_test - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "./tests/pox_4_test.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: signers - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "../../stackslib/src/chainstate/stacks/boot/signers.clar" - clarity-version: 2 - - emulated-contract-publish: - contract-name: signers-voting - emulated-sender: ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM - path: "../../stackslib/src/chainstate/stacks/boot/signers-voting.clar" - clarity-version: 2 - epoch: "2.4" diff --git a/contrib/core-contract-tests/package-lock.json b/contrib/core-contract-tests/package-lock.json index 50ded82d383..cb7cba8a422 100644 --- a/contrib/core-contract-tests/package-lock.json +++ b/contrib/core-contract-tests/package-lock.json @@ -9,15 +9,15 @@ "version": "1.0.0", "license": "ISC", "dependencies": { - "@hirosystems/clarinet-sdk": "^2.4.1", + "@hirosystems/clarinet-sdk": "^1.1.0", "@stacks/clarunit": "0.0.1", "@stacks/transactions": "^6.12.0", "chokidar-cli": "^3.0.0", "fast-check": "^3.15.1", - "typescript": "^5.4.2", - "vite": "^5.1.6", - "vitest": "^1.3.1", - "vitest-environment-clarinet": "^2.0.0" + "typescript": "^5.2.2", + "vite": "^4.4.9", + "vitest": "^0.34.4", + "vitest-environment-clarinet": "^1.0.0" } }, "node_modules/@aashutoshrathi/word-wrap": { @@ -28,70 +28,10 @@ "node": ">=0.10.0" } }, - "node_modules/@esbuild/aix-ppc64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.19.12.tgz", - "integrity": "sha512-bmoCYyWdEL3wDQIVbcyzRyeKLgk2WtWLTWz1ZIAZF/EGbNOwSA6ew3PftJ1PqMiOOGu0OyFMzG53L0zqIpPeNA==", - "cpu": [ - "ppc64" - ], - "optional": true, - "os": [ - "aix" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.19.12.tgz", - "integrity": "sha512-qg/Lj1mu3CdQlDEEiWrlC4eaPZ1KztwGJ9B6J+/6G+/4ewxJg7gqj8eVYWvao1bXrqGiW2rsBZFSX3q2lcW05w==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.19.12.tgz", - "integrity": "sha512-P0UVNGIienjZv3f5zq0DP3Nt2IE/3plFzuaS96vihvD0Hd6H/q4WXUGpCxD/E8YrSXfNyRPbpTq+T8ZQioSuPA==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/android-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.19.12.tgz", - "integrity": "sha512-3k7ZoUW6Q6YqhdhIaq/WZ7HwBpnFBlW905Fa4s4qWJyiNOgT1dOqDiVAQFwBH7gBRZr17gLrlFCRzF6jFh7Kew==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, "node_modules/@esbuild/darwin-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", - "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", + "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", "cpu": [ "arm64" ], @@ -103,276 +43,6 @@ "node": ">=12" } }, - "node_modules/@esbuild/darwin-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.19.12.tgz", - "integrity": "sha512-hKoVkKzFiToTgn+41qGhsUJXFlIjxI/jSYeZf3ugemDYZldIXIxhvwN6erJGlX4t5h417iFuheZ7l+YVn05N3A==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.12.tgz", - "integrity": "sha512-4aRvFIXmwAcDBw9AueDQ2YnGmz5L6obe5kmPT8Vd+/+x/JMVKCgdcRwH6APrbpNXsPz+K653Qg8HB/oXvXVukA==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/freebsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.19.12.tgz", - "integrity": "sha512-EYoXZ4d8xtBoVN7CEwWY2IN4ho76xjYXqSXMNccFSx2lgqOG/1TBPW0yPx1bJZk94qu3tX0fycJeeQsKovA8gg==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.19.12.tgz", - "integrity": "sha512-J5jPms//KhSNv+LO1S1TX1UWp1ucM6N6XuL6ITdKWElCu8wXP72l9MM0zDTzzeikVyqFE6U8YAV9/tFyj0ti+w==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.19.12.tgz", - "integrity": "sha512-EoTjyYyLuVPfdPLsGVVVC8a0p1BFFvtpQDB/YLEhaXyf/5bczaGeN15QkR+O4S5LeJ92Tqotve7i1jn35qwvdA==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ia32": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.19.12.tgz", - "integrity": "sha512-Thsa42rrP1+UIGaWz47uydHSBOgTUnwBwNq59khgIwktK6x60Hivfbux9iNR0eHCHzOLjLMLfUMLCypBkZXMHA==", - "cpu": [ - "ia32" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-loong64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.19.12.tgz", - "integrity": "sha512-LiXdXA0s3IqRRjm6rV6XaWATScKAXjI4R4LoDlvO7+yQqFdlr1Bax62sRwkVvRIrwXxvtYEHHI4dm50jAXkuAA==", - "cpu": [ - "loong64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-mips64el": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.19.12.tgz", - "integrity": "sha512-fEnAuj5VGTanfJ07ff0gOA6IPsvrVHLVb6Lyd1g2/ed67oU1eFzL0r9WL7ZzscD+/N6i3dWumGE1Un4f7Amf+w==", - "cpu": [ - "mips64el" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-ppc64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.19.12.tgz", - "integrity": "sha512-nYJA2/QPimDQOh1rKWedNOe3Gfc8PabU7HT3iXWtNUbRzXS9+vgB0Fjaqr//XNbd82mCxHzik2qotuI89cfixg==", - "cpu": [ - "ppc64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-riscv64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.19.12.tgz", - "integrity": "sha512-2MueBrlPQCw5dVJJpQdUYgeqIzDQgw3QtiAHUC4RBz9FXPrskyyU3VI1hw7C0BSKB9OduwSJ79FTCqtGMWqJHg==", - "cpu": [ - "riscv64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-s390x": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.19.12.tgz", - "integrity": "sha512-+Pil1Nv3Umes4m3AZKqA2anfhJiVmNCYkPchwFJNEJN5QxmTs1uzyy4TvmDrCRNT2ApwSari7ZIgrPeUx4UZDg==", - "cpu": [ - "s390x" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/linux-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.19.12.tgz", - "integrity": "sha512-B71g1QpxfwBvNrfyJdVDexenDIt1CiDN1TIXLbhOw0KhJzE78KIFGX6OJ9MrtC0oOqMWf+0xop4qEU8JrJTwCg==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/netbsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.19.12.tgz", - "integrity": "sha512-3ltjQ7n1owJgFbuC61Oj++XhtzmymoCihNFgT84UAmJnxJfm4sYCiSLTXZtE00VWYpPMYc+ZQmB6xbSdVh0JWA==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "netbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/openbsd-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.19.12.tgz", - "integrity": "sha512-RbrfTB9SWsr0kWmb9srfF+L933uMDdu9BIzdA7os2t0TXhCRjrQyCeOt6wVxr79CKD4c+p+YhCj31HBkYcXebw==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "openbsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/sunos-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.19.12.tgz", - "integrity": "sha512-HKjJwRrW8uWtCQnQOz9qcU3mUZhTUQvi56Q8DPTLLB+DawoiQdjsYq+j+D3s9I8VFtDr+F9CjgXKKC4ss89IeA==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "sunos" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-arm64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.19.12.tgz", - "integrity": "sha512-URgtR1dJnmGvX864pn1B2YUYNzjmXkuJOIqG2HdU62MVS4EHpU2946OZoTMnRUHklGtJdJZ33QfzdjGACXhn1A==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-ia32": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.19.12.tgz", - "integrity": "sha512-+ZOE6pUkMOJfmxmBZElNOx72NKpIa/HFOMGzu8fqzQJ5kgf6aTGrcJaFsNiVMH4JKpMipyK+7k0n2UXN7a8YKQ==", - "cpu": [ - "ia32" - ], - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@esbuild/win32-x64": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.19.12.tgz", - "integrity": "sha512-T1QyPSDCyMXaO3pzBkF96E8xMkiRYbUEZADd29SyPGabqxMViNoii+NcK7eWJAEoU6RZyEm5lVSIjTmcdoB9HA==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" - } - }, "node_modules/@eslint-community/eslint-utils": { "version": "4.4.0", "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", @@ -426,15 +96,12 @@ } }, "node_modules/@hirosystems/clarinet-sdk": { - "version": "2.4.1", - "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-2.4.1.tgz", - "integrity": "sha512-gFwU9Ljgmbe9/mLcco3yB9AIY02WT/Nk+BlSAAvFz0kh36FZWwwIqvAs/10LEU1vYwxs6oFh33dTLzNuKUbHdw==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-1.2.0.tgz", + "integrity": "sha512-O0Gyh3pwwOVJTbLlxHG6vSB/KXr+U/nZzd2kpubQO4Qqxjn5/vo8l8J+/fwKOxhzM4QOa42M1sCaVZSB/PkTFg==", "dependencies": { - "@hirosystems/clarinet-sdk-wasm": "^2.4.0", - "@stacks/encryption": "^6.12.0", - "@stacks/network": "^6.11.3", - "@stacks/stacking": "^6.11.4-pr.36558cf.0", - "@stacks/transactions": "^6.12.0", + "@hirosystems/clarinet-sdk-wasm": "^1.2.0", + "@stacks/transactions": "^6.9.0", "kolorist": "^1.8.0", "prompts": "^2.4.2", "vitest": "^1.0.4", @@ -448,21 +115,341 @@ } }, "node_modules/@hirosystems/clarinet-sdk-wasm": { - "version": "2.4.0", - "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-2.4.0.tgz", - "integrity": "sha512-qApXWsnWRtQcj5BsqoKd+AsEtDURA5CJQcRxgCAVjyRSjkbGJXxNgrW9oRnIkfIIKJ6D5mV7JGrr8CQ8BSJ/tg==" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk-wasm/-/clarinet-sdk-wasm-1.2.0.tgz", + "integrity": "sha512-TnJ243lEgIqHSIeMdEHi1hJceFBJ5mWfjfXv86GKaoyVOS6yX1vGL2a6ZuVO9FfWPNxsiSvaQV/FndVuansAVQ==" }, - "node_modules/@humanwhocodes/config-array": { - "version": "0.11.14", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", - "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", - "dependencies": { - "@humanwhocodes/object-schema": "^2.0.2", - "debug": "^4.3.1", - "minimatch": "^3.0.5" - }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@esbuild/darwin-arm64": { + "version": "0.19.10", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.10.tgz", + "integrity": "sha512-YSRRs2zOpwypck+6GL3wGXx2gNP7DXzetmo5pHXLrY/VIMsS59yKfjPizQ4lLt5vEI80M41gjm2BxrGZ5U+VMA==", + "cpu": [ + "arm64" + ], + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": ">=10.10.0" + "node": ">=12" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@vitest/expect": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.1.0.tgz", + "integrity": "sha512-9IE2WWkcJo2BR9eqtY5MIo3TPmS50Pnwpm66A6neb2hvk/QSLfPXBz2qdiwUOQkwyFuuXEUj5380CbwfzW4+/w==", + "dependencies": { + "@vitest/spy": "1.1.0", + "@vitest/utils": "1.1.0", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@vitest/runner": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.1.0.tgz", + "integrity": "sha512-zdNLJ00pm5z/uhbWF6aeIJCGMSyTyWImy3Fcp9piRGvueERFlQFbUwCpzVce79OLm2UHk9iwaMSOaU9jVHgNVw==", + "dependencies": { + "@vitest/utils": "1.1.0", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@vitest/snapshot": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.1.0.tgz", + "integrity": "sha512-5O/wyZg09V5qmNmAlUgCBqflvn2ylgsWJRRuPrnHEfDNT6tQpQ8O1isNGgo+VxofISHqz961SG3iVvt3SPK/QQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@vitest/spy": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.1.0.tgz", + "integrity": "sha512-sNOVSU/GE+7+P76qYo+VXdXhXffzWZcYIPQfmkiRxaNCSPiLANvQx5Mx6ZURJ/ndtEkUJEpvKLXqAYTKEY+lTg==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/@vitest/utils": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.1.0.tgz", + "integrity": "sha512-z+s510fKmYz4Y41XhNs3vcuFTFhcij2YF7F8VQfMEYAAUfqQh0Zfg7+w9xdgFGhPf3tX3TicAe+8BDITk6ampQ==", + "dependencies": { + "diff-sequences": "^29.6.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/esbuild": { + "version": "0.19.10", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.10.tgz", + "integrity": "sha512-S1Y27QGt/snkNYrRcswgRFqZjaTG5a5xM3EQo97uNBnH505pdzSNe/HLBq1v0RO7iK/ngdbhJB6mDAp0OK+iUA==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.10", + "@esbuild/android-arm": "0.19.10", + "@esbuild/android-arm64": "0.19.10", + "@esbuild/android-x64": "0.19.10", + "@esbuild/darwin-arm64": "0.19.10", + "@esbuild/darwin-x64": "0.19.10", + "@esbuild/freebsd-arm64": "0.19.10", + "@esbuild/freebsd-x64": "0.19.10", + "@esbuild/linux-arm": "0.19.10", + "@esbuild/linux-arm64": "0.19.10", + "@esbuild/linux-ia32": "0.19.10", + "@esbuild/linux-loong64": "0.19.10", + "@esbuild/linux-mips64el": "0.19.10", + "@esbuild/linux-ppc64": "0.19.10", + "@esbuild/linux-riscv64": "0.19.10", + "@esbuild/linux-s390x": "0.19.10", + "@esbuild/linux-x64": "0.19.10", + "@esbuild/netbsd-x64": "0.19.10", + "@esbuild/openbsd-x64": "0.19.10", + "@esbuild/sunos-x64": "0.19.10", + "@esbuild/win32-arm64": "0.19.10", + "@esbuild/win32-ia32": "0.19.10", + "@esbuild/win32-x64": "0.19.10" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/rollup": { + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.9.1.tgz", + "integrity": "sha512-pgPO9DWzLoW/vIhlSoDByCzcpX92bKEorbgXuZrqxByte3JFk2xSW2JEeAcyLc9Ru9pqcNNW+Ob7ntsk2oT/Xw==", + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.9.1", + "@rollup/rollup-android-arm64": "4.9.1", + "@rollup/rollup-darwin-arm64": "4.9.1", + "@rollup/rollup-darwin-x64": "4.9.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.9.1", + "@rollup/rollup-linux-arm64-gnu": "4.9.1", + "@rollup/rollup-linux-arm64-musl": "4.9.1", + "@rollup/rollup-linux-riscv64-gnu": "4.9.1", + "@rollup/rollup-linux-x64-gnu": "4.9.1", + "@rollup/rollup-linux-x64-musl": "4.9.1", + "@rollup/rollup-win32-arm64-msvc": "4.9.1", + "@rollup/rollup-win32-ia32-msvc": "4.9.1", + "@rollup/rollup-win32-x64-msvc": "4.9.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/tinypool": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.1.tgz", + "integrity": "sha512-zBTCK0cCgRROxvs9c0CGK838sPkeokNGdQVUUwHAbynHFlmyJYj825f/oRs528HaIJ97lo0pLIlDUzwN+IorWg==", + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/vite": { + "version": "5.0.10", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.0.10.tgz", + "integrity": "sha512-2P8J7WWgmc355HUMlFrwofacvr98DAjoE52BfdbwQtyLH06XKwaL/FMnmKM2crF0iX4MpmMKoDlNCB1ok7zHCw==", + "dependencies": { + "esbuild": "^0.19.3", + "postcss": "^8.4.32", + "rollup": "^4.2.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/vite-node": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.1.0.tgz", + "integrity": "sha512-jV48DDUxGLEBdHCQvxL1mEh7+naVy+nhUUUaPAZLd3FJgXuxQiewHcfeZebbJ6onDqNGkP4r3MhQ342PRlG81Q==", + "dependencies": { + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" + }, + "bin": { + "vite-node": "vite-node.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + } + }, + "node_modules/@hirosystems/clarinet-sdk/node_modules/vitest": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.1.0.tgz", + "integrity": "sha512-oDFiCrw7dd3Jf06HoMtSRARivvyjHJaTxikFxuqJjO76U436PqlVw1uLn7a8OSPrhSfMGVaRakKpA2lePdw79A==", + "dependencies": { + "@vitest/expect": "1.1.0", + "@vitest/runner": "1.1.0", + "@vitest/snapshot": "1.1.0", + "@vitest/spy": "1.1.0", + "@vitest/utils": "1.1.0", + "acorn-walk": "^8.3.0", + "cac": "^6.7.14", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^1.3.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.1", + "vite": "^5.0.0", + "vite-node": "1.1.0", + "why-is-node-running": "^2.2.2" + }, + "bin": { + "vitest": "vitest.mjs" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "^1.0.0", + "@vitest/ui": "^1.0.0", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" } }, "node_modules/@humanwhocodes/module-importer": { @@ -552,34 +539,10 @@ "node": ">= 8" } }, - "node_modules/@rollup/rollup-android-arm-eabi": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.13.0.tgz", - "integrity": "sha512-5ZYPOuaAqEH/W3gYsRkxQATBW3Ii1MfaT4EQstTnLKViLi2gLSQmlmtTpGucNP3sXEpOiI5tdGhjdE111ekyEg==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "android" - ] - }, - "node_modules/@rollup/rollup-android-arm64": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.13.0.tgz", - "integrity": "sha512-BSbaCmn8ZadK3UAQdlauSvtaJjhlDEjS5hEVVIN3A4bbl3X+otyf/kOJV08bYiRxfejP3DXFzO2jz3G20107+Q==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "android" - ] - }, "node_modules/@rollup/rollup-darwin-arm64": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.13.0.tgz", - "integrity": "sha512-Ovf2evVaP6sW5Ut0GHyUSOqA6tVKfrTHddtmxGQc1CTQa1Cw3/KMCDEEICZBbyppcwnhMwcDce9ZRxdWRpVd6g==", + "version": "4.9.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.9.1.tgz", + "integrity": "sha512-LtYcLNM+bhsaKAIGwVkh5IOWhaZhjTfNOkGzGqdHvhiCUVuJDalvDxEdSnhFzAn+g23wgsycmZk1vbnaibZwwA==", "cpu": [ "arm64" ], @@ -588,153 +551,10 @@ "darwin" ] }, - "node_modules/@rollup/rollup-darwin-x64": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.13.0.tgz", - "integrity": "sha512-U+Jcxm89UTK592vZ2J9st9ajRv/hrwHdnvyuJpa5A2ngGSVHypigidkQJP+YiGL6JODiUeMzkqQzbCG3At81Gg==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "darwin" - ] - }, - "node_modules/@rollup/rollup-linux-arm-gnueabihf": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.13.0.tgz", - "integrity": "sha512-8wZidaUJUTIR5T4vRS22VkSMOVooG0F4N+JSwQXWSRiC6yfEsFMLTYRFHvby5mFFuExHa/yAp9juSphQQJAijQ==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-gnu": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.13.0.tgz", - "integrity": "sha512-Iu0Kno1vrD7zHQDxOmvweqLkAzjxEVqNhUIXBsZ8hu8Oak7/5VTPrxOEZXYC1nmrBVJp0ZcL2E7lSuuOVaE3+w==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-arm64-musl": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.13.0.tgz", - "integrity": "sha512-C31QrW47llgVyrRjIwiOwsHFcaIwmkKi3PCroQY5aVq4H0A5v/vVVAtFsI1nfBngtoRpeREvZOkIhmRwUKkAdw==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-riscv64-gnu": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.13.0.tgz", - "integrity": "sha512-Oq90dtMHvthFOPMl7pt7KmxzX7E71AfyIhh+cPhLY9oko97Zf2C9tt/XJD4RgxhaGeAraAXDtqxvKE1y/j35lA==", - "cpu": [ - "riscv64" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-gnu": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.13.0.tgz", - "integrity": "sha512-yUD/8wMffnTKuiIsl6xU+4IA8UNhQ/f1sAnQebmE/lyQ8abjsVyDkyRkWop0kdMhKMprpNIhPmYlCxgHrPoXoA==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-linux-x64-musl": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.13.0.tgz", - "integrity": "sha512-9RyNqoFNdF0vu/qqX63fKotBh43fJQeYC98hCaf89DYQpv+xu0D8QFSOS0biA7cGuqJFOc1bJ+m2rhhsKcw1hw==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "linux" - ] - }, - "node_modules/@rollup/rollup-win32-arm64-msvc": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.13.0.tgz", - "integrity": "sha512-46ue8ymtm/5PUU6pCvjlic0z82qWkxv54GTJZgHrQUuZnVH+tvvSP0LsozIDsCBFO4VjJ13N68wqrKSeScUKdA==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-ia32-msvc": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.13.0.tgz", - "integrity": "sha512-P5/MqLdLSlqxbeuJ3YDeX37srC8mCflSyTrUsgbU1c/U9j6l2g2GiIdYaGD9QjdMQPMSgYm7hgg0551wHyIluw==", - "cpu": [ - "ia32" - ], - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@rollup/rollup-win32-x64-msvc": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.13.0.tgz", - "integrity": "sha512-UKXUQNbO3DOhzLRwHSpa0HnhhCgNODvfoPWv2FCXme8N/ANFfhIPMGuOT+QuKd16+B5yxZ0HdpNlqPvTMS1qfw==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "win32" - ] - }, - "node_modules/@scure/base": { - "version": "1.1.5", - "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.5.tgz", - "integrity": "sha512-Brj9FiG2W1MRQSTB212YVPRrcbjkv48FoZi/u4l/zds/ieRrqsh7aUf6CLwkAq61oKXr/ZlTzlY66gLIj3TFTQ==", - "funding": { - "url": "https://paulmillr.com/funding/" - } - }, - "node_modules/@scure/bip39": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@scure/bip39/-/bip39-1.1.0.tgz", - "integrity": "sha512-pwrPOS16VeTKg98dYXQyIjJEcWfz7/1YJIwxUEPFfQPtc86Ym/1sVgQ2RLoD43AazMk2l/unK4ITySSpW2+82w==", - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ], - "dependencies": { - "@noble/hashes": "~1.1.1", - "@scure/base": "~1.1.0" - } - }, - "node_modules/@sinclair/typebox": { - "version": "0.27.8", - "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", - "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" + "node_modules/@sinclair/typebox": { + "version": "0.27.8", + "resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz", + "integrity": "sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==" }, "node_modules/@stacks/clarunit": { "version": "0.0.1", @@ -755,55 +575,10 @@ "clarunit": "src/cli.ts" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/android-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.18.20.tgz", - "integrity": "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/android-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.18.20.tgz", - "integrity": "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/android-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.18.20.tgz", - "integrity": "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "android" - ], - "engines": { - "node": ">=12" - } - }, "node_modules/@stacks/clarunit/node_modules/@esbuild/darwin-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.18.20.tgz", - "integrity": "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==", + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.19.12.tgz", + "integrity": "sha512-B6IeSgZgtEzGC42jsI+YYu9Z3HKRxp8ZT3cqhvliEHovq8HSX2YX8lNocDn79gCKJXOSaEot9MVYky7AKjCs8g==", "cpu": [ "arm64" ], @@ -815,369 +590,360 @@ "node": ">=12" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/darwin-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.18.20.tgz", - "integrity": "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/freebsd-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.18.20.tgz", - "integrity": "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/freebsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.18.20.tgz", - "integrity": "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "freebsd" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-arm": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.18.20.tgz", - "integrity": "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==", - "cpu": [ - "arm" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.18.20.tgz", - "integrity": "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==", + "node_modules/@stacks/clarunit/node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.12.0.tgz", + "integrity": "sha512-X64tZd8dRE/QTrBIEs63kaOBG0b5GVEd3ccoLtyf6IdXtHdh8h+I56C2yC3PtC9Ucnv0CpNFJLqKFVgCYe0lOQ==", "cpu": [ "arm64" ], "optional": true, "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.18.20.tgz", - "integrity": "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==", - "cpu": [ - "ia32" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-loong64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.18.20.tgz", - "integrity": "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==", - "cpu": [ - "loong64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } + "darwin" + ] }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-mips64el": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.18.20.tgz", - "integrity": "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==", - "cpu": [ - "mips64el" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/@vitest/expect": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.3.1.tgz", + "integrity": "sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==", + "dependencies": { + "@vitest/spy": "1.3.1", + "@vitest/utils": "1.3.1", + "chai": "^4.3.10" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-ppc64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.18.20.tgz", - "integrity": "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==", - "cpu": [ - "ppc64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/@vitest/runner": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.3.1.tgz", + "integrity": "sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg==", + "dependencies": { + "@vitest/utils": "1.3.1", + "p-limit": "^5.0.0", + "pathe": "^1.1.1" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-riscv64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.18.20.tgz", - "integrity": "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==", - "cpu": [ - "riscv64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/@vitest/snapshot": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.3.1.tgz", + "integrity": "sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ==", + "dependencies": { + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-s390x": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.18.20.tgz", - "integrity": "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==", - "cpu": [ - "s390x" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/@vitest/spy": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.3.1.tgz", + "integrity": "sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig==", + "dependencies": { + "tinyspy": "^2.2.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/linux-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.18.20.tgz", - "integrity": "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/@vitest/utils": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.3.1.tgz", + "integrity": "sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==", + "dependencies": { + "diff-sequences": "^29.6.3", + "estree-walker": "^3.0.3", + "loupe": "^2.3.7", + "pretty-format": "^29.7.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/netbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.18.20.tgz", - "integrity": "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "netbsd" - ], + "node_modules/@stacks/clarunit/node_modules/esbuild": { + "version": "0.19.12", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", + "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "hasInstallScript": true, + "bin": { + "esbuild": "bin/esbuild" + }, "engines": { "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.19.12", + "@esbuild/android-arm": "0.19.12", + "@esbuild/android-arm64": "0.19.12", + "@esbuild/android-x64": "0.19.12", + "@esbuild/darwin-arm64": "0.19.12", + "@esbuild/darwin-x64": "0.19.12", + "@esbuild/freebsd-arm64": "0.19.12", + "@esbuild/freebsd-x64": "0.19.12", + "@esbuild/linux-arm": "0.19.12", + "@esbuild/linux-arm64": "0.19.12", + "@esbuild/linux-ia32": "0.19.12", + "@esbuild/linux-loong64": "0.19.12", + "@esbuild/linux-mips64el": "0.19.12", + "@esbuild/linux-ppc64": "0.19.12", + "@esbuild/linux-riscv64": "0.19.12", + "@esbuild/linux-s390x": "0.19.12", + "@esbuild/linux-x64": "0.19.12", + "@esbuild/netbsd-x64": "0.19.12", + "@esbuild/openbsd-x64": "0.19.12", + "@esbuild/sunos-x64": "0.19.12", + "@esbuild/win32-arm64": "0.19.12", + "@esbuild/win32-ia32": "0.19.12", + "@esbuild/win32-x64": "0.19.12" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/openbsd-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.18.20.tgz", - "integrity": "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "openbsd" - ], + "node_modules/@stacks/clarunit/node_modules/local-pkg": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", + "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", + "dependencies": { + "mlly": "^1.4.2", + "pkg-types": "^1.0.3" + }, "engines": { - "node": ">=12" + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/sunos-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.18.20.tgz", - "integrity": "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "sunos" - ], + "node_modules/@stacks/clarunit/node_modules/p-limit": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", + "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", + "dependencies": { + "yocto-queue": "^1.0.0" + }, "engines": { - "node": ">=12" + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/win32-arm64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.18.20.tgz", - "integrity": "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==", - "cpu": [ - "arm64" - ], - "optional": true, - "os": [ - "win32" - ], + "node_modules/@stacks/clarunit/node_modules/rollup": { + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.12.0.tgz", + "integrity": "sha512-wz66wn4t1OHIJw3+XU7mJJQV/2NAfw5OAk6G6Hoo3zcvz/XOfQ52Vgi+AN4Uxoxi0KBBwk2g8zPrTDA4btSB/Q==", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, "engines": { - "node": ">=12" + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.12.0", + "@rollup/rollup-android-arm64": "4.12.0", + "@rollup/rollup-darwin-arm64": "4.12.0", + "@rollup/rollup-darwin-x64": "4.12.0", + "@rollup/rollup-linux-arm-gnueabihf": "4.12.0", + "@rollup/rollup-linux-arm64-gnu": "4.12.0", + "@rollup/rollup-linux-arm64-musl": "4.12.0", + "@rollup/rollup-linux-riscv64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-gnu": "4.12.0", + "@rollup/rollup-linux-x64-musl": "4.12.0", + "@rollup/rollup-win32-arm64-msvc": "4.12.0", + "@rollup/rollup-win32-ia32-msvc": "4.12.0", + "@rollup/rollup-win32-x64-msvc": "4.12.0", + "fsevents": "~2.3.2" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/win32-ia32": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.18.20.tgz", - "integrity": "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==", - "cpu": [ - "ia32" - ], - "optional": true, - "os": [ - "win32" - ], - "engines": { - "node": ">=12" + "node_modules/@stacks/clarunit/node_modules/strip-literal": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.0.0.tgz", + "integrity": "sha512-f9vHgsCWBq2ugHAkGMiiYY+AYG0D/cbloKKg0nhaaaSNsujdGIpVXCNsrJpCKr5M0f4aI31mr13UjY6GAuXCKA==", + "dependencies": { + "js-tokens": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" } }, - "node_modules/@stacks/clarunit/node_modules/@esbuild/win32-x64": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.18.20.tgz", - "integrity": "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==", - "cpu": [ - "x64" - ], - "optional": true, - "os": [ - "win32" - ], + "node_modules/@stacks/clarunit/node_modules/tinypool": { + "version": "0.8.2", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.2.tgz", + "integrity": "sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ==", "engines": { - "node": ">=12" + "node": ">=14.0.0" } }, - "node_modules/@stacks/clarunit/node_modules/@hirosystems/clarinet-sdk": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@hirosystems/clarinet-sdk/-/clarinet-sdk-1.3.0.tgz", - "integrity": "sha512-CB6+E8gcFJp2Q+iYZ74opI+GJBsAEc54SdPdMY8lvz/H7lHonQtyjn0aOPl8Th5jcC3Cq+yqb6APDWbBr07vUA==", + "node_modules/@stacks/clarunit/node_modules/vite-node": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.3.1.tgz", + "integrity": "sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng==", "dependencies": { - "@hirosystems/clarinet-sdk-wasm": "^2.2.0", - "@stacks/transactions": "^6.9.0", - "kolorist": "^1.8.0", - "prompts": "^2.4.2", - "vitest": "^1.0.4", - "yargs": "^17.7.2" + "cac": "^6.7.14", + "debug": "^4.3.4", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "vite": "^5.0.0" }, "bin": { - "clarinet-sdk": "dist/cjs/bin/index.js" + "vite-node": "vite-node.mjs" }, "engines": { - "node": ">=18.0.0" + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://opencollective.com/vitest" } }, - "node_modules/@stacks/clarunit/node_modules/esbuild": { - "version": "0.18.20", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", - "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", - "hasInstallScript": true, + "node_modules/@stacks/clarunit/node_modules/vite-node/node_modules/vite": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.4.tgz", + "integrity": "sha512-n+MPqzq+d9nMVTKyewqw6kSt+R3CkvF9QAKY8obiQn8g1fwTscKxyfaYnC632HtBXAQGc1Yjomphwn1dtwGAHg==", + "dependencies": { + "esbuild": "^0.19.3", + "postcss": "^8.4.35", + "rollup": "^4.2.0" + }, "bin": { - "esbuild": "bin/esbuild" + "vite": "bin/vite.js" }, "engines": { - "node": ">=12" + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" }, "optionalDependencies": { - "@esbuild/android-arm": "0.18.20", - "@esbuild/android-arm64": "0.18.20", - "@esbuild/android-x64": "0.18.20", - "@esbuild/darwin-arm64": "0.18.20", - "@esbuild/darwin-x64": "0.18.20", - "@esbuild/freebsd-arm64": "0.18.20", - "@esbuild/freebsd-x64": "0.18.20", - "@esbuild/linux-arm": "0.18.20", - "@esbuild/linux-arm64": "0.18.20", - "@esbuild/linux-ia32": "0.18.20", - "@esbuild/linux-loong64": "0.18.20", - "@esbuild/linux-mips64el": "0.18.20", - "@esbuild/linux-ppc64": "0.18.20", - "@esbuild/linux-riscv64": "0.18.20", - "@esbuild/linux-s390x": "0.18.20", - "@esbuild/linux-x64": "0.18.20", - "@esbuild/netbsd-x64": "0.18.20", - "@esbuild/openbsd-x64": "0.18.20", - "@esbuild/sunos-x64": "0.18.20", - "@esbuild/win32-arm64": "0.18.20", - "@esbuild/win32-ia32": "0.18.20", - "@esbuild/win32-x64": "0.18.20" + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } } }, - "node_modules/@stacks/clarunit/node_modules/rollup": { - "version": "3.29.4", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", - "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", + "node_modules/@stacks/clarunit/node_modules/vitest": { + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.3.1.tgz", + "integrity": "sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ==", + "dependencies": { + "@vitest/expect": "1.3.1", + "@vitest/runner": "1.3.1", + "@vitest/snapshot": "1.3.1", + "@vitest/spy": "1.3.1", + "@vitest/utils": "1.3.1", + "acorn-walk": "^8.3.2", + "chai": "^4.3.10", + "debug": "^4.3.4", + "execa": "^8.0.1", + "local-pkg": "^0.5.0", + "magic-string": "^0.30.5", + "pathe": "^1.1.1", + "picocolors": "^1.0.0", + "std-env": "^3.5.0", + "strip-literal": "^2.0.0", + "tinybench": "^2.5.1", + "tinypool": "^0.8.2", + "vite": "^5.0.0", + "vite-node": "1.3.1", + "why-is-node-running": "^2.2.2" + }, "bin": { - "rollup": "dist/bin/rollup" + "vitest": "vitest.mjs" }, "engines": { - "node": ">=14.18.0", - "npm": ">=8.0.0" + "node": "^18.0.0 || >=20.0.0" }, - "optionalDependencies": { - "fsevents": "~2.3.2" + "funding": { + "url": "https://opencollective.com/vitest" + }, + "peerDependencies": { + "@edge-runtime/vm": "*", + "@types/node": "^18.0.0 || >=20.0.0", + "@vitest/browser": "1.3.1", + "@vitest/ui": "1.3.1", + "happy-dom": "*", + "jsdom": "*" + }, + "peerDependenciesMeta": { + "@edge-runtime/vm": { + "optional": true + }, + "@types/node": { + "optional": true + }, + "@vitest/browser": { + "optional": true + }, + "@vitest/ui": { + "optional": true + }, + "happy-dom": { + "optional": true + }, + "jsdom": { + "optional": true + } } }, - "node_modules/@stacks/clarunit/node_modules/vite": { - "version": "4.5.2", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.2.tgz", - "integrity": "sha512-tBCZBNSBbHQkaGyhGCDUGqeo2ph8Fstyp6FMSvTtsXeZSPpSMGlviAOav2hxVTqFcx8Hj/twtWKsMJXNY0xI8w==", + "node_modules/@stacks/clarunit/node_modules/vitest/node_modules/vite": { + "version": "5.1.4", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.4.tgz", + "integrity": "sha512-n+MPqzq+d9nMVTKyewqw6kSt+R3CkvF9QAKY8obiQn8g1fwTscKxyfaYnC632HtBXAQGc1Yjomphwn1dtwGAHg==", "dependencies": { - "esbuild": "^0.18.10", - "postcss": "^8.4.27", - "rollup": "^3.27.1" + "esbuild": "^0.19.3", + "postcss": "^8.4.35", + "rollup": "^4.2.0" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^14.18.0 || >=16.0.0" + "node": "^18.0.0 || >=20.0.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" }, "optionalDependencies": { - "fsevents": "~2.3.2" + "fsevents": "~2.3.3" }, "peerDependencies": { - "@types/node": ">= 14", + "@types/node": "^18.0.0 || >=20.0.0", "less": "*", "lightningcss": "^1.21.0", "sass": "*", @@ -1209,15 +975,6 @@ } } }, - "node_modules/@stacks/clarunit/node_modules/vitest-environment-clarinet": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-1.1.0.tgz", - "integrity": "sha512-abf6VPWVyzZ6Ynz3kNqKZGsJuS0MH5xKyJeR9dgE5Y4dQ34KaBoTeA2jzgamDUxyX1y+5/yA/SBQab4sZjX8Sg==", - "peerDependencies": { - "@hirosystems/clarinet-sdk": "1", - "vitest": "1" - } - }, "node_modules/@stacks/common": { "version": "6.10.0", "resolved": "https://registry.npmjs.org/@stacks/common/-/common-6.10.0.tgz", @@ -1227,22 +984,6 @@ "@types/node": "^18.0.4" } }, - "node_modules/@stacks/encryption": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.12.0.tgz", - "integrity": "sha512-CubE51pHrcxx3yA+xapevPgA9UDleIoEaUZ06/9uD91B42yvTg37HyS8t06rzukU9q+X7Cv2I/+vbuf4nJIo8g==", - "dependencies": { - "@noble/hashes": "1.1.5", - "@noble/secp256k1": "1.7.1", - "@scure/bip39": "1.1.0", - "@stacks/common": "^6.10.0", - "@types/node": "^18.0.4", - "base64-js": "^1.5.1", - "bs58": "^5.0.0", - "ripemd160-min": "^0.0.6", - "varuint-bitcoin": "^1.1.2" - } - }, "node_modules/@stacks/network": { "version": "6.11.3", "resolved": "https://registry.npmjs.org/@stacks/network/-/network-6.11.3.tgz", @@ -1252,36 +993,6 @@ "cross-fetch": "^3.1.5" } }, - "node_modules/@stacks/stacking": { - "version": "6.12.0", - "resolved": "https://registry.npmjs.org/@stacks/stacking/-/stacking-6.12.0.tgz", - "integrity": "sha512-XBxwbaCGRPnjpjspb3CBXrlZl6xR+gghLMz9PQNPdpuIbBDFa0SGeHgqjtpVU+2DVL4UyBx8PVsAWtlssyVGng==", - "dependencies": { - "@scure/base": "1.1.1", - "@stacks/common": "^6.10.0", - "@stacks/encryption": "^6.12.0", - "@stacks/network": "^6.11.3", - "@stacks/stacks-blockchain-api-types": "^0.61.0", - "@stacks/transactions": "^6.12.0", - "bs58": "^5.0.0" - } - }, - "node_modules/@stacks/stacking/node_modules/@scure/base": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/@scure/base/-/base-1.1.1.tgz", - "integrity": "sha512-ZxOhsSyxYwLJj3pLZCefNitxsj093tb2vq90mp2txoYeBqbcjDjqFhyM8eUjq/uFm6zJ+mUuqxlS2FkuSY1MTA==", - "funding": [ - { - "type": "individual", - "url": "https://paulmillr.com/funding/" - } - ] - }, - "node_modules/@stacks/stacks-blockchain-api-types": { - "version": "0.61.0", - "resolved": "https://registry.npmjs.org/@stacks/stacks-blockchain-api-types/-/stacks-blockchain-api-types-0.61.0.tgz", - "integrity": "sha512-yPOfTUboo5eA9BZL/hqMcM71GstrFs9YWzOrJFPeP4cOO1wgYvAcckgBRbgiE3NqeX0A7SLZLDAXLZbATuRq9w==" - }, "node_modules/@stacks/transactions": { "version": "6.12.0", "resolved": "https://registry.npmjs.org/@stacks/transactions/-/transactions-6.12.0.tgz", @@ -1303,15 +1014,28 @@ "@types/node": "*" } }, + "node_modules/@types/chai": { + "version": "4.3.9", + "resolved": "https://registry.npmjs.org/@types/chai/-/chai-4.3.9.tgz", + "integrity": "sha512-69TtiDzu0bcmKQv3yg1Zx409/Kd7r0b5F1PfpYJfSHzLGtB53547V4u+9iqKYsTu/O2ai6KTb0TInNpvuQ3qmg==" + }, + "node_modules/@types/chai-subset": { + "version": "1.3.4", + "resolved": "https://registry.npmjs.org/@types/chai-subset/-/chai-subset-1.3.4.tgz", + "integrity": "sha512-CCWNXrJYSUIojZ1149ksLl3AN9cmZ5djf+yUoVVV+NuYrtydItQVlL2ZDqyC6M6O9LWRnVf8yYDxbXHO2TfQZg==", + "dependencies": { + "@types/chai": "*" + } + }, "node_modules/@types/estree": { "version": "1.0.5", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==" }, "node_modules/@types/node": { - "version": "18.19.23", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.23.tgz", - "integrity": "sha512-wtE3d0OUfNKtZYAqZb8HAWGxxXsImJcPUAgZNw+dWFxO6s5tIwIjyKnY76tsTatsNCLJPkVYwUpq15D38ng9Aw==", + "version": "18.18.8", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.18.8.tgz", + "integrity": "sha512-OLGBaaK5V3VRBS1bAkMVP2/W9B+H8meUfl866OrMNQqt7wDgdpWPp5o6gmIc9pB+lIQHSq4ZL8ypeH1vPxcPaQ==", "dependencies": { "undici-types": "~5.26.4" } @@ -1322,12 +1046,12 @@ "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==" }, "node_modules/@vitest/expect": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-1.3.1.tgz", - "integrity": "sha512-xofQFwIzfdmLLlHa6ag0dPV8YsnKOCP1KdAeVVh34vSjN2dcUiXYCD9htu/9eM7t8Xln4v03U9HLxLpPlsXdZw==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/@vitest/expect/-/expect-0.34.6.tgz", + "integrity": "sha512-QUzKpUQRc1qC7qdGo7rMK3AkETI7w18gTCUrsNnyjjJKYiuUB9+TQK3QnR1unhCnWRC0AbKv2omLGQDF/mIjOw==", "dependencies": { - "@vitest/spy": "1.3.1", - "@vitest/utils": "1.3.1", + "@vitest/spy": "0.34.6", + "@vitest/utils": "0.34.6", "chai": "^4.3.10" }, "funding": { @@ -1335,85 +1059,59 @@ } }, "node_modules/@vitest/runner": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-1.3.1.tgz", - "integrity": "sha512-5FzF9c3jG/z5bgCnjr8j9LNq/9OxV2uEBAITOXfoe3rdZJTdO7jzThth7FXv/6b+kdY65tpRQB7WaKhNZwX+Kg==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/@vitest/runner/-/runner-0.34.6.tgz", + "integrity": "sha512-1CUQgtJSLF47NnhN+F9X2ycxUP0kLHQ/JWvNHbeBfwW8CzEGgeskzNnHDyv1ieKTltuR6sdIHV+nmR6kPxQqzQ==", "dependencies": { - "@vitest/utils": "1.3.1", - "p-limit": "^5.0.0", + "@vitest/utils": "0.34.6", + "p-limit": "^4.0.0", "pathe": "^1.1.1" }, "funding": { "url": "https://opencollective.com/vitest" } }, - "node_modules/@vitest/runner/node_modules/p-limit": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-5.0.0.tgz", - "integrity": "sha512-/Eaoq+QyLSiXQ4lyYV23f14mZRQcXnxfHrN0vCai+ak9G0pp9iEQukIIZq5NccEvwRB8PUnZT0KsOoDCINS1qQ==", - "dependencies": { - "yocto-queue": "^1.0.0" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/@vitest/runner/node_modules/yocto-queue": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", - "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", - "engines": { - "node": ">=12.20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/@vitest/snapshot": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-1.3.1.tgz", - "integrity": "sha512-EF++BZbt6RZmOlE3SuTPu/NfwBF6q4ABS37HHXzs2LUVPBLx2QoY/K0fKpRChSo8eLiuxcbCVfqKgx/dplCDuQ==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/@vitest/snapshot/-/snapshot-0.34.6.tgz", + "integrity": "sha512-B3OZqYn6k4VaN011D+ve+AA4whM4QkcwcrwaKwAbyyvS/NB1hCWjFIBQxAQQSQir9/RtyAAGuq+4RJmbn2dH4w==", "dependencies": { - "magic-string": "^0.30.5", + "magic-string": "^0.30.1", "pathe": "^1.1.1", - "pretty-format": "^29.7.0" + "pretty-format": "^29.5.0" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/spy": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-1.3.1.tgz", - "integrity": "sha512-xAcW+S099ylC9VLU7eZfdT9myV67Nor9w9zhf0mGCYJSO+zM2839tOeROTdikOi/8Qeusffvxb/MyBSOja1Uig==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/@vitest/spy/-/spy-0.34.6.tgz", + "integrity": "sha512-xaCvneSaeBw/cz8ySmF7ZwGvL0lBjfvqc1LpQ/vcdHEvpLn3Ff1vAvjw+CoGn0802l++5L/pxb7whwcWAw+DUQ==", "dependencies": { - "tinyspy": "^2.2.0" + "tinyspy": "^2.1.1" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/@vitest/utils": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-1.3.1.tgz", - "integrity": "sha512-d3Waie/299qqRyHTm2DjADeTaNdNSVsnwHPWrs20JMpjh6eiVq7ggggweO8rc4arhf6rRkWuHKwvxGvejUXZZQ==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/@vitest/utils/-/utils-0.34.6.tgz", + "integrity": "sha512-IG5aDD8S6zlvloDsnzHw0Ut5xczlF+kv2BOTo+iXfPr54Yhi5qbVOgGB1hZaVq4iJ4C/MZ2J0y15IlsV/ZcI0A==", "dependencies": { - "diff-sequences": "^29.6.3", - "estree-walker": "^3.0.3", - "loupe": "^2.3.7", - "pretty-format": "^29.7.0" + "diff-sequences": "^29.4.3", + "loupe": "^2.3.6", + "pretty-format": "^29.5.0" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/acorn": { - "version": "8.11.3", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz", - "integrity": "sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg==", + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", "bin": { "acorn": "bin/acorn" }, @@ -1461,14 +1159,11 @@ } }, "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dependencies": { - "color-convert": "^2.0.1" - }, + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", + "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", "engines": { - "node": ">=8" + "node": ">=10" }, "funding": { "url": "https://github.com/chalk/ansi-styles?sponsor=1" @@ -1500,33 +1195,14 @@ } }, "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" - }, - "node_modules/base-x": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", - "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/base-x": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/base-x/-/base-x-4.0.0.tgz", + "integrity": "sha512-FuwxlW4H5kh37X/oW59pwTzzTKRzfrrQwhmyspRM7swOEZcHtDZSCt45U6oKgtuFE+WYPblePMVIPR4RZrh/hw==" }, "node_modules/binary-extensions": { "version": "2.2.0", @@ -1556,14 +1232,6 @@ "node": ">=8" } }, - "node_modules/bs58": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/bs58/-/bs58-5.0.0.tgz", - "integrity": "sha512-r+ihvQJvahgYT50JD05dyJNKlmmSlMoOGwn1lCcEzanPglg7TxYjioQUYehQ9mAR/+hOSd2jRc/Z2y5UxBymvQ==", - "dependencies": { - "base-x": "^4.0.0" - } - }, "node_modules/c32check": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/c32check/-/c32check-2.0.0.tgz", @@ -1601,9 +1269,9 @@ } }, "node_modules/chai": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/chai/-/chai-4.4.1.tgz", - "integrity": "sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g==", + "version": "4.3.10", + "resolved": "https://registry.npmjs.org/chai/-/chai-4.3.10.tgz", + "integrity": "sha512-0UXG04VuVbruMUYbJ6JctvH0YnC/4q3/AkT18q4NaITo91CUm0liMS9VqzT9vZhVQ/1eqPanMWjBM+Juhfb/9g==", "dependencies": { "assertion-error": "^1.1.0", "check-error": "^1.0.3", @@ -1632,6 +1300,36 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/chalk/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/chalk/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/chalk/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, "node_modules/check-error": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/check-error/-/check-error-1.0.3.tgz", @@ -1644,9 +1342,15 @@ } }, "node_modules/chokidar": { - "version": "3.6.0", - "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", - "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "version": "3.5.3", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.5.3.tgz", + "integrity": "sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==", + "funding": [ + { + "type": "individual", + "url": "https://paulmillr.com/funding/" + } + ], "dependencies": { "anymatch": "~3.1.2", "braces": "~3.0.2", @@ -1659,9 +1363,6 @@ "engines": { "node": ">= 8.10.0" }, - "funding": { - "url": "https://paulmillr.com/funding/" - }, "optionalDependencies": { "fsevents": "~2.3.2" } @@ -1712,35 +1413,11 @@ "wrap-ansi": "^5.1.0" } }, - "node_modules/chokidar-cli/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/chokidar-cli/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" - }, "node_modules/chokidar-cli/node_modules/emoji-regex": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-7.0.3.tgz", "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" }, - "node_modules/chokidar-cli/node_modules/find-up": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", - "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", - "dependencies": { - "locate-path": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, "node_modules/chokidar-cli/node_modules/is-fullwidth-code-point": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", @@ -1749,51 +1426,6 @@ "node": ">=4" } }, - "node_modules/chokidar-cli/node_modules/locate-path": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", - "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", - "dependencies": { - "p-locate": "^3.0.0", - "path-exists": "^3.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/chokidar-cli/node_modules/p-limit": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", - "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", - "dependencies": { - "p-try": "^2.0.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/chokidar-cli/node_modules/p-locate": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", - "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", - "dependencies": { - "p-limit": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/chokidar-cli/node_modules/path-exists": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", - "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", - "engines": { - "node": ">=4" - } - }, "node_modules/chokidar-cli/node_modules/string-width": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/string-width/-/string-width-3.1.0.tgz", @@ -1876,20 +1508,17 @@ } }, "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "version": "1.9.3", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", + "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" + "color-name": "1.1.3" } }, "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", + "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==" }, "node_modules/concat-map": { "version": "0.0.1", @@ -1982,9 +1611,9 @@ "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" }, "node_modules/esbuild": { - "version": "0.19.12", - "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.19.12.tgz", - "integrity": "sha512-aARqgq8roFBj054KvQr5f1sFu0D65G+miZRCuJyJ0G13Zwx7vRar5Zhn2tkQNzIXcBrNVsv/8stehpj+GAjgbg==", + "version": "0.18.20", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.18.20.tgz", + "integrity": "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==", "hasInstallScript": true, "bin": { "esbuild": "bin/esbuild" @@ -1993,35 +1622,34 @@ "node": ">=12" }, "optionalDependencies": { - "@esbuild/aix-ppc64": "0.19.12", - "@esbuild/android-arm": "0.19.12", - "@esbuild/android-arm64": "0.19.12", - "@esbuild/android-x64": "0.19.12", - "@esbuild/darwin-arm64": "0.19.12", - "@esbuild/darwin-x64": "0.19.12", - "@esbuild/freebsd-arm64": "0.19.12", - "@esbuild/freebsd-x64": "0.19.12", - "@esbuild/linux-arm": "0.19.12", - "@esbuild/linux-arm64": "0.19.12", - "@esbuild/linux-ia32": "0.19.12", - "@esbuild/linux-loong64": "0.19.12", - "@esbuild/linux-mips64el": "0.19.12", - "@esbuild/linux-ppc64": "0.19.12", - "@esbuild/linux-riscv64": "0.19.12", - "@esbuild/linux-s390x": "0.19.12", - "@esbuild/linux-x64": "0.19.12", - "@esbuild/netbsd-x64": "0.19.12", - "@esbuild/openbsd-x64": "0.19.12", - "@esbuild/sunos-x64": "0.19.12", - "@esbuild/win32-arm64": "0.19.12", - "@esbuild/win32-ia32": "0.19.12", - "@esbuild/win32-x64": "0.19.12" + "@esbuild/android-arm": "0.18.20", + "@esbuild/android-arm64": "0.18.20", + "@esbuild/android-x64": "0.18.20", + "@esbuild/darwin-arm64": "0.18.20", + "@esbuild/darwin-x64": "0.18.20", + "@esbuild/freebsd-arm64": "0.18.20", + "@esbuild/freebsd-x64": "0.18.20", + "@esbuild/linux-arm": "0.18.20", + "@esbuild/linux-arm64": "0.18.20", + "@esbuild/linux-ia32": "0.18.20", + "@esbuild/linux-loong64": "0.18.20", + "@esbuild/linux-mips64el": "0.18.20", + "@esbuild/linux-ppc64": "0.18.20", + "@esbuild/linux-riscv64": "0.18.20", + "@esbuild/linux-s390x": "0.18.20", + "@esbuild/linux-x64": "0.18.20", + "@esbuild/netbsd-x64": "0.18.20", + "@esbuild/openbsd-x64": "0.18.20", + "@esbuild/sunos-x64": "0.18.20", + "@esbuild/win32-arm64": "0.18.20", + "@esbuild/win32-ia32": "0.18.20", + "@esbuild/win32-x64": "0.18.20" } }, "node_modules/escalade": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.2.tgz", - "integrity": "sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.1.1.tgz", + "integrity": "sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==", "engines": { "node": ">=6" } @@ -2103,29 +1731,105 @@ "node": "^12.22.0 || ^14.17.0 || >=16.0.0" }, "funding": { - "url": "https://opencollective.com/eslint" + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/eslint/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/eslint-visitor-keys": { - "version": "3.4.3", - "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", - "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "node_modules/eslint/node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", "engines": { - "node": "^12.22.0 || ^14.17.0 || >=16.0.0" - }, - "funding": { - "url": "https://opencollective.com/eslint" + "node": ">=8" } }, - "node_modules/eslint/node_modules/glob-parent": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", - "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", - "dependencies": { - "is-glob": "^4.0.3" - }, + "node_modules/eslint/node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", "engines": { - "node": ">=10.13.0" + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/espree": { @@ -2213,9 +1917,9 @@ } }, "node_modules/fast-check": { - "version": "3.16.0", - "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.16.0.tgz", - "integrity": "sha512-k8GtQHi4pJoRQ1gVDFQno+/FVkowo/ehiz/aCj9O/D7HRWb1sSFzNrw+iPVU8QlWtH+jNwbuN+dDVg3QkS56DQ==", + "version": "3.15.1", + "resolved": "https://registry.npmjs.org/fast-check/-/fast-check-3.15.1.tgz", + "integrity": "sha512-GutOXZ+SCxGaFWfHe0Pbeq8PrkpGtPxA9/hdkI3s9YzqeMlrq5RdJ+QfYZ/S93jMX+tAyqgW0z5c9ppD+vkGUw==", "funding": [ { "type": "individual", @@ -2279,18 +1983,14 @@ } }, "node_modules/find-up": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", - "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-3.0.0.tgz", + "integrity": "sha512-1yD6RmLI1XBfxugvORwlck6f75tYL+iR0jqwsOrOxMZyGYqUuDhJ0l4AXdO1iX/FTs9cBAMEk1gWSEx1kSbylg==", "dependencies": { - "locate-path": "^6.0.0", - "path-exists": "^4.0.0" + "locate-path": "^3.0.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=6" } }, "node_modules/flat-cache": { @@ -2462,9 +2162,9 @@ } }, "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", + "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" }, "node_modules/is-binary-path": { "version": "2.1.0", @@ -2568,9 +2268,9 @@ "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==" }, "node_modules/jsonc-parser": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.1.tgz", - "integrity": "sha512-AilxAyFOAcK5wA1+LeaySVBrHsGQvUFCDWXKpZjzaL0PqW+xfBOttn8GNtWKFWqneyMZj41MWF9Kl6iPWLwgOA==" + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/jsonc-parser/-/jsonc-parser-3.2.0.tgz", + "integrity": "sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==" }, "node_modules/keyv": { "version": "4.5.4", @@ -2606,13 +2306,9 @@ } }, "node_modules/local-pkg": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.5.0.tgz", - "integrity": "sha512-ok6z3qlYyCDS4ZEU27HaU6x/xZa9Whf8jD4ptH5UZTQYZVYeb9bnZ3ojVhiJNLiXK1Hfc0GNbLXcmZ5plLDDBg==", - "dependencies": { - "mlly": "^1.4.2", - "pkg-types": "^1.0.3" - }, + "version": "0.4.3", + "resolved": "https://registry.npmjs.org/local-pkg/-/local-pkg-0.4.3.tgz", + "integrity": "sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==", "engines": { "node": ">=14" }, @@ -2621,17 +2317,15 @@ } }, "node_modules/locate-path": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", - "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-3.0.0.tgz", + "integrity": "sha512-7AO748wWnIhNqAuaty2ZWHkQHRSNfPVIsPIfwEOWO22AmaoVrWavlOcMR5nzTLNYvp36X220/maaRsrec1G65A==", "dependencies": { - "p-locate": "^5.0.0" + "p-locate": "^3.0.0", + "path-exists": "^3.0.0" }, "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" + "node": ">=6" } }, "node_modules/lodash.clonedeep": { @@ -2663,9 +2357,9 @@ } }, "node_modules/magic-string": { - "version": "0.30.8", - "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.8.tgz", - "integrity": "sha512-ISQTe55T2ao7XtlAStud6qwYPZjE4GK1S/BeVPus4jrq6JuOnQ00YKQC581RWhR122W7msZV263KzVeLoqidyQ==", + "version": "0.30.5", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.5.tgz", + "integrity": "sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==", "dependencies": { "@jridgewell/sourcemap-codec": "^1.4.15" }, @@ -2701,14 +2395,14 @@ } }, "node_modules/mlly": { - "version": "1.6.1", - "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.6.1.tgz", - "integrity": "sha512-vLgaHvaeunuOXHSmEbZ9izxPx3USsk8KCQ8iC+aTlp5sKRSoZvwhHh5L9VbKSaVC6sJDqbyohIS76E2VmHIPAA==", + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/mlly/-/mlly-1.4.2.tgz", + "integrity": "sha512-i/Ykufi2t1EZ6NaPLdfnZk2AX8cs0d+mTzVKuPfqPKPatxLApaBoxJQ9x1/uckXtrS/U5oisPMDkNs0yQTaBRg==", "dependencies": { - "acorn": "^8.11.3", - "pathe": "^1.1.2", + "acorn": "^8.10.0", + "pathe": "^1.1.1", "pkg-types": "^1.0.3", - "ufo": "^1.3.2" + "ufo": "^1.3.0" } }, "node_modules/ms": { @@ -2766,9 +2460,9 @@ } }, "node_modules/npm-run-path": { - "version": "5.3.0", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.3.0.tgz", - "integrity": "sha512-ppwTtiJZq0O/ai0z7yfudtBpWIoxM8yE6nHi1X47eFR2EWORqfbu6CnPlNsjeN683eT0qG6H/Pyf9fCcvjnnnQ==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-5.2.0.tgz", + "integrity": "sha512-W4/tgAXFqFA0iL7fk0+uQ3g7wkL8xJmx3XdK0VGb4cHW//eZTtKGvFBBoRKVTpY7n6ze4NL9ly7rgXcHufqXKg==", "dependencies": { "path-key": "^4.0.0" }, @@ -2829,28 +2523,39 @@ } }, "node_modules/p-limit": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", - "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-4.0.0.tgz", + "integrity": "sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==", "dependencies": { - "yocto-queue": "^0.1.0" + "yocto-queue": "^1.0.0" }, "engines": { - "node": ">=10" + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/p-locate": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", - "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-3.0.0.tgz", + "integrity": "sha512-x+12w/To+4GFfgJhBEpiDcLozRJGegY+Ei7/z0tSLkMmxGZNybVMSfWj9aJn8Z5Fc7dBUNJOOVgPv2H7IwulSQ==", "dependencies": { - "p-limit": "^3.0.2" + "p-limit": "^2.0.0" }, "engines": { - "node": ">=10" + "node": ">=6" + } + }, + "node_modules/p-locate/node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -2885,11 +2590,11 @@ } }, "node_modules/path-exists": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", - "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-3.0.0.tgz", + "integrity": "sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==", "engines": { - "node": ">=8" + "node": ">=4" } }, "node_modules/path-is-absolute": { @@ -2909,9 +2614,9 @@ } }, "node_modules/pathe": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.2.tgz", - "integrity": "sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==" + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/pathe/-/pathe-1.1.1.tgz", + "integrity": "sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==" }, "node_modules/pathval": { "version": "1.1.1", @@ -2995,17 +2700,6 @@ "node": "^14.15.0 || ^16.10.0 || >=18.0.0" } }, - "node_modules/pretty-format/node_modules/ansi-styles": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-5.2.0.tgz", - "integrity": "sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, "node_modules/process": { "version": "0.11.10", "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", @@ -3128,42 +2822,18 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/ripemd160-min": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/ripemd160-min/-/ripemd160-min-0.0.6.tgz", - "integrity": "sha512-+GcJgQivhs6S9qvLogusiTcS9kQUfgR75whKuy5jIhuiOfQuJ8fjqxV6EGD5duH1Y/FawFUMtMhyeq3Fbnib8A==", - "engines": { - "node": ">=8" - } - }, "node_modules/rollup": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.13.0.tgz", - "integrity": "sha512-3YegKemjoQnYKmsBlOHfMLVPPA5xLkQ8MHLLSw/fBrFaVkEayL51DilPpNNLq1exr98F2B1TzrV0FUlN3gWRPg==", - "dependencies": { - "@types/estree": "1.0.5" - }, + "version": "3.29.4", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-3.29.4.tgz", + "integrity": "sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==", "bin": { "rollup": "dist/bin/rollup" }, "engines": { - "node": ">=18.0.0", + "node": ">=14.18.0", "npm": ">=8.0.0" }, "optionalDependencies": { - "@rollup/rollup-android-arm-eabi": "4.13.0", - "@rollup/rollup-android-arm64": "4.13.0", - "@rollup/rollup-darwin-arm64": "4.13.0", - "@rollup/rollup-darwin-x64": "4.13.0", - "@rollup/rollup-linux-arm-gnueabihf": "4.13.0", - "@rollup/rollup-linux-arm64-gnu": "4.13.0", - "@rollup/rollup-linux-arm64-musl": "4.13.0", - "@rollup/rollup-linux-riscv64-gnu": "4.13.0", - "@rollup/rollup-linux-x64-gnu": "4.13.0", - "@rollup/rollup-linux-x64-musl": "4.13.0", - "@rollup/rollup-win32-arm64-msvc": "4.13.0", - "@rollup/rollup-win32-ia32-msvc": "4.13.0", - "@rollup/rollup-win32-x64-msvc": "4.13.0", "fsevents": "~2.3.2" } }, @@ -3189,25 +2859,6 @@ "queue-microtask": "^1.2.2" } }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, "node_modules/set-blocking": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/set-blocking/-/set-blocking-2.0.0.tgz", @@ -3318,11 +2969,11 @@ } }, "node_modules/strip-literal": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-2.0.0.tgz", - "integrity": "sha512-f9vHgsCWBq2ugHAkGMiiYY+AYG0D/cbloKKg0nhaaaSNsujdGIpVXCNsrJpCKr5M0f4aI31mr13UjY6GAuXCKA==", + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/strip-literal/-/strip-literal-1.3.0.tgz", + "integrity": "sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==", "dependencies": { - "js-tokens": "^8.0.2" + "acorn": "^8.10.0" }, "funding": { "url": "https://github.com/sponsors/antfu" @@ -3345,22 +2996,22 @@ "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" }, "node_modules/tinybench": { - "version": "2.6.0", - "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.6.0.tgz", - "integrity": "sha512-N8hW3PG/3aOoZAN5V/NSAEDz0ZixDSSt5b/a05iqtpgfLWMSVuCo7w0k2vVvEjdrIoeGqZzweX2WlyioNIHchA==" + "version": "2.5.1", + "resolved": "https://registry.npmjs.org/tinybench/-/tinybench-2.5.1.tgz", + "integrity": "sha512-65NKvSuAVDP/n4CqH+a9w2kTlLReS9vhsAP06MWx+/89nMinJyB2icyl58RIcqCmIggpojIGeuJGhjU1aGMBSg==" }, "node_modules/tinypool": { - "version": "0.8.2", - "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.8.2.tgz", - "integrity": "sha512-SUszKYe5wgsxnNOVlBYO6IC+8VGWdVGZWAqUxp3UErNBtptZvWbwyUOyzNL59zigz2rCA92QiL3wvG+JDSdJdQ==", + "version": "0.7.0", + "resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.7.0.tgz", + "integrity": "sha512-zSYNUlYSMhJ6Zdou4cJwo/p7w5nmAH17GRfU/ui3ctvjXFErXXkruT4MWW6poDeXgCaIBlGLrfU6TbTXxyGMww==", "engines": { "node": ">=14.0.0" } }, "node_modules/tinyspy": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.1.tgz", - "integrity": "sha512-KYad6Vy5VDWV4GH3fjpseMQ/XU2BhIYP7Vzd0LG44qRWm/Yt2WCOTicFdvmgo6gWaqooMQCawTtILVQJupKu7A==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tinyspy/-/tinyspy-2.2.0.tgz", + "integrity": "sha512-d2eda04AN/cPOR89F7Xv5bK/jrQEhmcLFe6HFldoeO9AJtps+fqEnh486vnT/8y4bw38pSyxDcTCAq+Ks2aJTg==", "engines": { "node": ">=14.0.0" } @@ -3412,9 +3063,9 @@ } }, "node_modules/typescript": { - "version": "5.4.2", - "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.4.2.tgz", - "integrity": "sha512-+2/g0Fds1ERlP6JsakQQDXjZdZMM+rqpamFZJEKh4kwTIn3iDkgKtby0CeNd5ATNZ4Ry1ax15TMx0W2V+miizQ==", + "version": "5.2.2", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", + "integrity": "sha512-mI4WrpHsbCIcwT9cF4FZvr80QUeKvsUsUvKDoR+X/7XHQH98xYD8YHZg7ANtz2GtZt/CBq2QJ0thkGJMHfqc1w==", "bin": { "tsc": "bin/tsc", "tsserver": "bin/tsserver" @@ -3424,9 +3075,9 @@ } }, "node_modules/ufo": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.4.0.tgz", - "integrity": "sha512-Hhy+BhRBleFjpJ2vchUNN40qgkh0366FWJGqVLYBHev0vpHTrXSA0ryT+74UiW6KWsldNurQMKGqCm1M2zBciQ==" + "version": "1.3.1", + "resolved": "https://registry.npmjs.org/ufo/-/ufo-1.3.1.tgz", + "integrity": "sha512-uY/99gMLIOlJPwATcMVYfqDSxUR9//AUcgZMzwfSTJPDKzA1S8mX4VLqa+fiAtveraQUBCz4FFcwVZBGbwBXIw==" }, "node_modules/undici-types": { "version": "5.26.5", @@ -3449,42 +3100,29 @@ "inherits": "2.0.3" } }, - "node_modules/util/node_modules/inherits": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.3.tgz", - "integrity": "sha512-x00IRNXNy63jwGkJmzPigoySHbaqpNuzKbBOmzK+g2OdZpQ9w+sxCN+VSB3ja7IAge2OP2qpfxTjeNcyjmW1uw==" - }, - "node_modules/varuint-bitcoin": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/varuint-bitcoin/-/varuint-bitcoin-1.1.2.tgz", - "integrity": "sha512-4EVb+w4rx+YfVM32HQX42AbbT7/1f5zwAYhIujKXKk8NQK+JfRVl3pqT3hjNn/L+RstigmGGKVwHA/P0wgITZw==", - "dependencies": { - "safe-buffer": "^5.1.1" - } - }, "node_modules/vite": { - "version": "5.1.6", - "resolved": "https://registry.npmjs.org/vite/-/vite-5.1.6.tgz", - "integrity": "sha512-yYIAZs9nVfRJ/AiOLCA91zzhjsHUgMjB+EigzFb6W2XTLO8JixBCKCjvhKZaye+NKYHCrkv3Oh50dH9EdLU2RA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.0.tgz", + "integrity": "sha512-ulr8rNLA6rkyFAlVWw2q5YJ91v098AFQ2R0PRFwPzREXOUJQPtFUG0t+/ZikhaOCDqFoDhN6/v8Sq0o4araFAw==", "dependencies": { - "esbuild": "^0.19.3", - "postcss": "^8.4.35", - "rollup": "^4.2.0" + "esbuild": "^0.18.10", + "postcss": "^8.4.27", + "rollup": "^3.27.1" }, "bin": { "vite": "bin/vite.js" }, "engines": { - "node": "^18.0.0 || >=20.0.0" + "node": "^14.18.0 || >=16.0.0" }, "funding": { "url": "https://github.com/vitejs/vite?sponsor=1" }, "optionalDependencies": { - "fsevents": "~2.3.3" + "fsevents": "~2.3.2" }, "peerDependencies": { - "@types/node": "^18.0.0 || >=20.0.0", + "@types/node": ">= 14", "less": "*", "lightningcss": "^1.21.0", "sass": "*", @@ -3517,76 +3155,80 @@ } }, "node_modules/vite-node": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-1.3.1.tgz", - "integrity": "sha512-azbRrqRxlWTJEVbzInZCTchx0X69M/XPTCz4H+TLvlTcR/xH/3hkRqhOakT41fMJCMzXTu4UvegkZiEoJAWvng==", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/vite-node/-/vite-node-0.34.6.tgz", + "integrity": "sha512-nlBMJ9x6n7/Amaz6F3zJ97EBwR2FkzhBRxF5e+jE6LA3yi6Wtc2lyTij1OnDMIr34v5g/tVQtsVAzhT0jc5ygA==", "dependencies": { "cac": "^6.7.14", "debug": "^4.3.4", + "mlly": "^1.4.0", "pathe": "^1.1.1", "picocolors": "^1.0.0", - "vite": "^5.0.0" + "vite": "^3.0.0 || ^4.0.0 || ^5.0.0-0" }, "bin": { "vite-node": "vite-node.mjs" }, "engines": { - "node": "^18.0.0 || >=20.0.0" + "node": ">=v14.18.0" }, "funding": { "url": "https://opencollective.com/vitest" } }, "node_modules/vitest": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/vitest/-/vitest-1.3.1.tgz", - "integrity": "sha512-/1QJqXs8YbCrfv/GPQ05wAZf2eakUPLPa18vkJAKE7RXOKfVHqMZZ1WlTjiwl6Gcn65M5vpNUB6EFLnEdRdEXQ==", - "dependencies": { - "@vitest/expect": "1.3.1", - "@vitest/runner": "1.3.1", - "@vitest/snapshot": "1.3.1", - "@vitest/spy": "1.3.1", - "@vitest/utils": "1.3.1", - "acorn-walk": "^8.3.2", + "version": "0.34.6", + "resolved": "https://registry.npmjs.org/vitest/-/vitest-0.34.6.tgz", + "integrity": "sha512-+5CALsOvbNKnS+ZHMXtuUC7nL8/7F1F2DnHGjSsszX8zCjWSSviphCb/NuS9Nzf4Q03KyyDRBAXhF/8lffME4Q==", + "dependencies": { + "@types/chai": "^4.3.5", + "@types/chai-subset": "^1.3.3", + "@types/node": "*", + "@vitest/expect": "0.34.6", + "@vitest/runner": "0.34.6", + "@vitest/snapshot": "0.34.6", + "@vitest/spy": "0.34.6", + "@vitest/utils": "0.34.6", + "acorn": "^8.9.0", + "acorn-walk": "^8.2.0", + "cac": "^6.7.14", "chai": "^4.3.10", "debug": "^4.3.4", - "execa": "^8.0.1", - "local-pkg": "^0.5.0", - "magic-string": "^0.30.5", + "local-pkg": "^0.4.3", + "magic-string": "^0.30.1", "pathe": "^1.1.1", "picocolors": "^1.0.0", - "std-env": "^3.5.0", - "strip-literal": "^2.0.0", - "tinybench": "^2.5.1", - "tinypool": "^0.8.2", - "vite": "^5.0.0", - "vite-node": "1.3.1", + "std-env": "^3.3.3", + "strip-literal": "^1.0.1", + "tinybench": "^2.5.0", + "tinypool": "^0.7.0", + "vite": "^3.1.0 || ^4.0.0 || ^5.0.0-0", + "vite-node": "0.34.6", "why-is-node-running": "^2.2.2" }, "bin": { "vitest": "vitest.mjs" }, "engines": { - "node": "^18.0.0 || >=20.0.0" + "node": ">=v14.18.0" }, "funding": { "url": "https://opencollective.com/vitest" }, "peerDependencies": { "@edge-runtime/vm": "*", - "@types/node": "^18.0.0 || >=20.0.0", - "@vitest/browser": "1.3.1", - "@vitest/ui": "1.3.1", + "@vitest/browser": "*", + "@vitest/ui": "*", "happy-dom": "*", - "jsdom": "*" + "jsdom": "*", + "playwright": "*", + "safaridriver": "*", + "webdriverio": "*" }, "peerDependenciesMeta": { "@edge-runtime/vm": { "optional": true }, - "@types/node": { - "optional": true - }, "@vitest/browser": { "optional": true }, @@ -3598,16 +3240,25 @@ }, "jsdom": { "optional": true + }, + "playwright": { + "optional": true + }, + "safaridriver": { + "optional": true + }, + "webdriverio": { + "optional": true } } }, "node_modules/vitest-environment-clarinet": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-2.0.0.tgz", - "integrity": "sha512-NW8Z0JPV/hwB1WkvGiGED9JmXsefPUjImJRbO3BEsxdL8qxA1y2EAwuqjfmvXYDeisQSnZGbfns7DN8eDxJnpg==", + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/vitest-environment-clarinet/-/vitest-environment-clarinet-1.0.3.tgz", + "integrity": "sha512-h/FeWPiEBS4a359Y8ZNo8nsftsfEoyLtZpJdnvDggDzcEUNkAsssU4tQzLp+KPm2VohAleqjFGSYMOGRbgLtDA==", "peerDependencies": { - "@hirosystems/clarinet-sdk": "2", - "vitest": "^1.3.1" + "@hirosystems/clarinet-sdk": "1", + "vitest": "0" } }, "node_modules/webidl-conversions": { @@ -3674,6 +3325,36 @@ "url": "https://github.com/chalk/wrap-ansi?sponsor=1" } }, + "node_modules/wrap-ansi/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/wrap-ansi/node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/wrap-ansi/node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", @@ -3713,11 +3394,11 @@ } }, "node_modules/yocto-queue": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", - "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-1.0.0.tgz", + "integrity": "sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==", "engines": { - "node": ">=10" + "node": ">=12.20" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" diff --git a/contrib/core-contract-tests/package.json b/contrib/core-contract-tests/package.json index 7ba3ba62e21..774870d8f9b 100644 --- a/contrib/core-contract-tests/package.json +++ b/contrib/core-contract-tests/package.json @@ -3,7 +3,6 @@ "version": "1.0.0", "description": "Run unit tests on this project.", "private": true, - "type": "module", "scripts": { "test": "vitest run -- --coverage", "genhtml": "genhtml lcov.info --branch-coverage -o coverage/" @@ -11,14 +10,14 @@ "author": "", "license": "ISC", "dependencies": { - "@hirosystems/clarinet-sdk": "^2.4.1", - "@stacks/clarunit": "0.0.1", + "@hirosystems/clarinet-sdk": "^1.1.0", "@stacks/transactions": "^6.12.0", "chokidar-cli": "^3.0.0", + "@stacks/clarunit": "0.0.1", "fast-check": "^3.15.1", - "typescript": "^5.4.2", - "vite": "^5.1.6", - "vitest": "^1.3.1", - "vitest-environment-clarinet": "^2.0.0" + "typescript": "^5.2.2", + "vite": "^4.4.9", + "vitest": "^0.34.4", + "vitest-environment-clarinet": "^1.0.0" } -} +} \ No newline at end of file diff --git a/contrib/core-contract-tests/tests/pox-4/pox-4.prop.test.ts b/contrib/core-contract-tests/tests/pox-4/pox-4.prop.test.ts deleted file mode 100644 index 454480c41fb..00000000000 --- a/contrib/core-contract-tests/tests/pox-4/pox-4.prop.test.ts +++ /dev/null @@ -1,1838 +0,0 @@ -import { - Cl, - ClarityType, - ClarityValue, - createStacksPrivateKey, - isClarityType, - pubKeyfromPrivKey, - serializeCV, - signWithKey, -} from "@stacks/transactions"; -import fc from "fast-check"; -import { assert, describe, expect, it } from "vitest"; -import { createHash } from "crypto"; - -// Contract Consts -const INITIAL_TOTAL_LIQ_SUPPLY = 1_000_000_000_000_000; -const MIN_AMOUNT_USTX = 125_000_000_000n; -const TESTNET_PREPARE_CYCLE_LENGTH = 50; -const TESTNET_REWARD_CYCLE_LENGTH = 1050; -const TESTNET_STACKING_THRESHOLD_25 = 8000; -// Clarity -const MAX_CLAR_UINT = 340282366920938463463374607431768211455n; -const TESTNET_CHAIN_ID = 2147483648; -const SIP_018_MESSAGE_PREFIX = "534950303138"; -// Error Codes -const ERR_STACKING_INVALID_LOCK_PERIOD = 2; -const ERR_STACKING_THRESHOLD_NOT_MET = 11; -const ERR_STACKING_INVALID_POX_ADDRESS = 13; -const ERR_STACKING_INVALID_AMOUNT = 18; -const ERR_INVALID_SIGNATURE_PUBKEY = 35; -const ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH = 38; -// Private Keys -const privateKeyMapping: { - [key: string]: string; -} = { - ST1PQHQKV0RJXZFY1DGX8MNSNYVE3VGZJSRTPGZGM: - "753b7cc01a1a2e86221266a154af739463fce51219d97e4f856cd7200c3bd2a601", - ST1SJ3DTE5DN7X54YDH5D64R3BCB6A2AG2ZQ8YPD5: - "7287ba251d44a4d3fd9276c88ce34c5c52a038955511cccaf77e61068649c17801", - ST2CY5V39NHDPWSXMW9QDT3HC3GD6Q6XX4CFRK9AG: - "530d9f61984c888536871c6573073bdfc0058896dc1adfe9a6a10dfacadc209101", - ST2JHG361ZXG51QTKY2NQCVBPPRRE2KZB1HR05NNC: - "d655b2523bcd65e34889725c73064feb17ceb796831c0e111ba1a552b0f31b3901", - ST2NEB84ASENDXKYGJPQW86YXQCEFEX2ZQPG87ND: - "f9d7206a47f14d2870c163ebab4bf3e70d18f5d14ce1031f3902fbbc894fe4c701", - ST2REHHS5J3CERCRBEPMGH7921Q6PYKAADT7JP2VB: - "3eccc5dac8056590432db6a35d52b9896876a3d5cbdea53b72400bc9c2099fe801", - ST3AM1A56AK2C1XAFJ4115ZSV26EB49BVQ10MGCS0: - "7036b29cb5e235e5fd9b09ae3e8eec4404e44906814d5d01cbca968a60ed4bfb01", - ST3PF13W7Z0RRM42A8VZRVFQ75SV1K26RXEP8YGKJ: - "b463f0df6c05d2f156393eee73f8016c5372caa0e9e29a901bb7171d90dc4f1401", - ST3NBRSFKX28FQ2ZJ1MAKX58HKHSDGNV5N7R21XCP: - "6a1a754ba863d7bab14adbbc3f8ebb090af9e871ace621d3e5ab634e1422885e01", - STNHKEPYEPJ8ET55ZZ0M5A34J0R3N5FM2CMMMAZ6: - "de433bdfa14ec43aa1098d5be594c8ffb20a31485ff9de2923b2689471c401b801", -}; - -const sha256 = (data: Buffer): Buffer => - createHash("sha256").update(data).digest(); - -const structuredDataHash = (structuredData: ClarityValue): Buffer => - sha256(Buffer.from(serializeCV(structuredData))); - -const generateDomainHash = (): ClarityValue => - Cl.tuple({ - name: Cl.stringAscii("pox-4-signer"), - version: Cl.stringAscii("1.0.0"), - "chain-id": Cl.uint(TESTNET_CHAIN_ID), - }); - -const generateMessageHash = ( - version: number, - hashbytes: number[], - reward_cycle: number, - topic: string, - period: number, - auth_id: number, - max_amount: number -): ClarityValue => - Cl.tuple({ - "pox-addr": Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - "reward-cycle": Cl.uint(reward_cycle), - topic: Cl.stringAscii(topic), - period: Cl.uint(period), - "auth-id": Cl.uint(auth_id), - "max-amount": Cl.uint(max_amount), - }); - -const generateMessagePrefixBuffer = (prefix: string) => - Buffer.from(prefix, "hex"); - -const buildSignerKeyMessageHash = ( - version: number, - hashbytes: number[], - reward_cycle: number, - topic: string, - period: number, - max_amount: number, - auth_id: number -) => { - const domain_hash = structuredDataHash(generateDomainHash()); - const message_hash = structuredDataHash( - generateMessageHash( - version, - hashbytes, - reward_cycle, - topic, - period, - auth_id, - max_amount - ) - ); - const structuredDataPrefix = generateMessagePrefixBuffer( - SIP_018_MESSAGE_PREFIX - ); - - const signer_key_message_hash = sha256( - Buffer.concat([structuredDataPrefix, domain_hash, message_hash]) - ); - - return signer_key_message_hash; -}; - -const signMessageHash = (privateKey: string, messageHash: Buffer) => { - const data = signWithKey( - createStacksPrivateKey(privateKey), - messageHash.toString("hex") - ).data; - return Buffer.from(data.slice(2) + data.slice(0, 2), "hex"); -}; - -describe("test pox-4 contract read only functions", () => { - it("should return correct reward-cycle-to-burn-height", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - (account, reward_cycle) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - account - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const first_burn_block_height = - pox_4_info.value.data["first-burnchain-block-height"]; - const reward_cycle_length = - pox_4_info.value.data["reward-cycle-length"]; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "reward-cycle-to-burn-height", - [Cl.uint(reward_cycle)], - account - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - assert(isClarityType(first_burn_block_height, ClarityType.UInt)); - assert(isClarityType(reward_cycle_length, ClarityType.UInt)); - const expected = - Number(first_burn_block_height.value) + - Number(reward_cycle_length.value) * reward_cycle; - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return correct burn-height-to-reward-cycle", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - (account, burn_height) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - account - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const first_burn_block_height = - pox_4_info.value.data["first-burnchain-block-height"]; - const reward_cycle_length = - pox_4_info.value.data["reward-cycle-length"]; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "burn-height-to-reward-cycle", - [Cl.uint(burn_height)], - account - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - assert(isClarityType(first_burn_block_height, ClarityType.UInt)); - assert(isClarityType(reward_cycle_length, ClarityType.UInt)); - const expected = Math.floor( - (burn_height - Number(first_burn_block_height.value)) / - Number(reward_cycle_length.value) - ); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return u0 current-pox-reward-cycle", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - let expected = 0; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "current-pox-reward-cycle", - [], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return none get-stacker-info", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.constantFrom(...simnet.getAccounts().values()), - (stacker, caller) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-stacker-info", - [Cl.principal(stacker)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - expect(actual).toBeNone(); - } - ) - ); - }); - - it("should return true check-caller-allowed", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-caller-allowed", - [], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolTrue)); - expect(actual).toBeBool(true); - } - ) - ); - }); - - it("should return u0 get-reward-set-size", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - (caller, reward_cycle) => { - // Arrange - const expected = 0; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-reward-set-size", - [Cl.uint(reward_cycle)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return u0 get-total-ustx-stacked", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - (caller, reward_cycle) => { - // Arrange - const expected = 0; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-total-ustx-stacked", - [Cl.uint(reward_cycle)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return none get-reward-set-pox-address", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - fc.nat(), - (caller, index, reward_cycle) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-reward-set-pox-address", - [Cl.uint(index), Cl.uint(reward_cycle)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - expect(actual).toBeNone(); - } - ) - ); - }); - - it("should return correct get-stacking-minimum", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const stx_liq_supply = - pox_4_info.value.data["total-liquid-supply-ustx"]; - assert(isClarityType(stx_liq_supply, ClarityType.UInt)); - const expected = Math.floor( - Number(stx_liq_supply.value) / TESTNET_STACKING_THRESHOLD_25 - ); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-stacking-minimum", - [], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return true check-pox-addr-version for version <= 6 ", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - (caller, version) => { - // Arrange - const expected = true; - // Act - let { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-pox-addr-version", - [Cl.buffer(Uint8Array.from([version]))], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolTrue)); - expect(actual).toBeBool(expected); - } - ) - ); - }); - - it("should return false check-pox-addr-version for version > 6", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 7, max: 255 }), - (caller, version) => { - // Arrange - const expected = false; - // Act - let { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-pox-addr-version", - [Cl.buffer(Uint8Array.from([version]))], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolFalse)); - expect(actual).toBeBool(expected); - } - ) - ); - }); - - it("should return true check-pox-lock-period for valid reward cycles number", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 1, max: 12 }), - (caller, valid_reward_cycles) => { - // Arrange - const expected = true; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-pox-lock-period", - [Cl.uint(valid_reward_cycles)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolTrue)); - expect(actual).toBeBool(expected); - } - ) - ); - }); - - it("should return false check-pox-lock-period for reward cycles number > 12", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 13 }), - (caller, invalid_reward_cycles) => { - // Arrange - const expected = false; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-pox-lock-period", - [Cl.uint(invalid_reward_cycles)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolFalse)); - expect(actual).toBeBool(expected); - } - ) - ); - }); - - it("should return false check-pox-lock-period for reward cycles number == 0", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - const invalid_reward_cycles = 0; - const expected = false; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "check-pox-lock-period", - [Cl.uint(invalid_reward_cycles)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.BoolFalse)); - expect(actual).toBeBool(expected); - } - ) - ); - }); - - it("should return (ok true) can-stack-stx for versions 0-4 valid pox addresses, hashbytes, amount, cycles number", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 20, - maxLength: 20, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseOk = true; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.BoolTrue)); - expect(actual).toBeOk(Cl.bool(expectedResponseOk)); - } - ) - ); - }); - - it("should return (ok true) can-stack-stx for versions 5/6 valid pox addresses, hashbytes, amount, cycles number", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 5, max: 6 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 32, - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseOk = true; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.BoolTrue)); - expect(actual).toBeOk(Cl.bool(expectedResponseOk)); - } - ) - ); - }); - - it("should return (err 13) can-stack-stx for pox addresses having version > 6", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ - min: 7, - max: 255, - }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) can-stack-stx for versions 0-4 pox addresses having hasbytes longer than 20", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 21, - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) can-stack-stx for versions 0-4 pox addresses having hasbytes shorter than 20", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 19, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) can-stack-stx for versions 5/6 pox addresses having hashbytes shorter than 32", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 31, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 11) can-stack-stx for unmet stacking threshold", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 6 }), - fc.array(fc.nat({ max: 255 })), - fc.bigInt({ - min: 0n, - max: 124_999_999_999n, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_THRESHOLD_NOT_MET; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 2) can-stack-stx for lock period > 12", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 6 }), - fc.array(fc.nat({ max: 255 })), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 13 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_LOCK_PERIOD; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (ok true) minimal-can-stack-stx for versions 0-4 valid pox addresses, hashbytes, amount, cycles number", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 20, - maxLength: 20, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseOk = true; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.BoolTrue)); - expect(actual).toBeOk(Cl.bool(expectedResponseOk)); - } - ) - ); - }); - - it("should return (ok true) minimal-can-stack-stx for versions 5/6 valid pox addresses, hashbytes, amount, cycles number", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 5, max: 6 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 32, - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseOk = true; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.BoolTrue)); - expect(actual).toBeOk(Cl.bool(expectedResponseOk)); - } - ) - ); - }); - - it("should return (err 13) minimal-can-stack-stx for pox addresses having version > 6", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ - min: 7, - max: 255, - }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) minimal-can-stack-stx for versions 0-4 pox addresses having hasbytes longer than 20", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - minLength: 21, - maxLength: 32, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) minimal-can-stack-stx for versions 0-4 pox addresses having hasbytes shorter than 20", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 19, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 13) minimal-can-stack-stx for versions 5/6 pox addresses having hashbytes shorter than 32", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 4 }), - fc.array(fc.nat({ max: 255 }), { - maxLength: 31, - }), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_POX_ADDRESS; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 2) minimal-can-stack-stx for lock period > 12", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 6 }), - fc.array(fc.nat({ max: 255 })), - fc.bigInt({ - min: MIN_AMOUNT_USTX, - max: MAX_CLAR_UINT, - }), - fc.nat(), - fc.integer({ min: 13 }), - ( - caller, - version, - hashbytes, - amount_ustx, - first_rew_cycle, - num_cycles - ) => { - // Arrange - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_LOCK_PERIOD; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return (err 18) minimal-can-stack-stx for amount == 0", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.integer({ min: 0, max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.integer({ min: 1, max: 12 }), - (caller, version, hashbytes, first_rew_cycle, num_cycles) => { - // Arrange - const amount_ustx = 0; - const { result: pox_4_info } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - assert(isClarityType(pox_4_info, ClarityType.ResponseOk)); - assert(isClarityType(pox_4_info.value, ClarityType.Tuple)); - const expectedResponseErr = ERR_STACKING_INVALID_AMOUNT; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "minimal-can-stack-stx", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(amount_ustx), - Cl.uint(first_rew_cycle), - Cl.uint(num_cycles), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - } - ) - ); - }); - - it("should return none get-check-delegation", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-check-delegation", - [Cl.principal(caller)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - } - ) - ); - }); - - it("should return none get-delegation-info", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-delegation-info", - [Cl.principal(caller)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - } - ) - ); - }); - - it("should return correct get-pox-info", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - (caller) => { - // Arrange - const expected_reward_cycle_id = 0, - expected_first_burn_block_height = 0; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-pox-info", - [], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.Tuple)); - expect(actual.value.data["first-burnchain-block-height"]).toBeUint( - expected_first_burn_block_height - ); - expect(actual.value.data["min-amount-ustx"]).toBeUint( - MIN_AMOUNT_USTX - ); - expect(actual.value.data["prepare-cycle-length"]).toBeUint( - TESTNET_PREPARE_CYCLE_LENGTH - ); - expect(actual.value.data["reward-cycle-id"]).toBeUint( - expected_reward_cycle_id - ); - expect(actual.value.data["reward-cycle-length"]).toBeUint( - TESTNET_REWARD_CYCLE_LENGTH - ); - expect(actual.value.data["total-liquid-supply-ustx"]).toBeUint( - INITIAL_TOTAL_LIQ_SUPPLY - ); - } - ) - ); - }); - - it("should return none get-allowance-contract-caller", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.constantFrom(...simnet.getAccounts().values()), - fc.constantFrom(...simnet.getAccounts().values()), - (caller, sender, contract_caller) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-allowance-contract-callers", - [Cl.principal(sender), Cl.principal(contract_caller)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - } - ) - ); - }); - - it("should return some(until-burn-ht: none) get-allowance-contract-caller after allow-contract-caller", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.constantFrom(...simnet.getAccounts().values()), - fc.constantFrom(...simnet.getAccounts().values()), - (caller, sender, contract_caller) => { - // Arrange - const { result: allow } = simnet.callPublicFn( - "pox-4", - "allow-contract-caller", - [Cl.principal(contract_caller), Cl.none()], - sender - ); - assert(isClarityType(allow, ClarityType.ResponseOk)); - assert(isClarityType(allow.value, ClarityType.BoolTrue)); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-allowance-contract-callers", - [Cl.principal(sender), Cl.principal(contract_caller)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalSome)); - assert(isClarityType(actual.value, ClarityType.Tuple)); - expect(actual).toBeSome(Cl.tuple({ "until-burn-ht": Cl.none() })); - } - ) - ); - }); - - it("should return u0 get-num-reward-set-pox-addresses", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat(), - (caller, reward_cycle) => { - // Arrange - const expected = 0; - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-num-reward-set-pox-addresses", - [Cl.uint(reward_cycle)], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.UInt)); - expect(actual).toBeUint(expected); - } - ) - ); - }); - - it("should return none get-partial-stacked-by-cycle", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.constantFrom(...simnet.getAccounts().values()), - (caller, version, hashbytes, reward_cycle, sender) => { - // Arrange - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-partial-stacked-by-cycle", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(reward_cycle), - Cl.principal(sender), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.OptionalNone)); - } - ) - ); - }); - - it("should return correct hash get-signer-key-message-hash", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - ( - caller, - version, - hashbytes, - reward_cycle, - period, - max_amount, - auth_id - ) => { - // Arrange - - const signer_key_message_hash = buildSignerKeyMessageHash( - version, - hashbytes, - reward_cycle, - "topic", - period, - max_amount, - auth_id - ); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "get-signer-key-message-hash", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(reward_cycle), - Cl.stringAscii("topic"), - Cl.uint(period), - Cl.uint(max_amount), - Cl.uint(auth_id), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.Buffer)); - expect(actual).toBeBuff(signer_key_message_hash); - } - ) - ); - }); - - it("should return (ok true) verify-signer-key-sig called with correct data", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - ( - caller, - version, - hashbytes, - reward_cycle, - period, - amount, - max_amount, - auth_id - ) => { - // Arrange - fc.pre(amount <= max_amount); - const signer_private_key = privateKeyMapping[caller] ?? ""; - const signer_key_message_hash = buildSignerKeyMessageHash( - version, - hashbytes, - reward_cycle, - "topic", - period, - max_amount, - auth_id - ); - const signer_sig = signMessageHash( - signer_private_key, - signer_key_message_hash - ); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "verify-signer-key-sig", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(reward_cycle), - Cl.stringAscii("topic"), - Cl.uint(period), - Cl.some(Cl.buffer(signer_sig)), - Cl.buffer(pubKeyfromPrivKey(signer_private_key).data), - Cl.uint(amount), - Cl.uint(max_amount), - Cl.uint(auth_id), - ], - caller - ); - assert(isClarityType(actual, ClarityType.ResponseOk)); - assert(isClarityType(actual.value, ClarityType.BoolTrue)); - expect(actual).toBeOk(Cl.bool(true)); - expect(actual.value).toBeBool(true); - } - ) - ); - }); - - it("should return (err 35) verify-signer-key-sig called with wrong public key", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - fc.constantFrom(...simnet.getAccounts().values()), - ( - caller, - version, - hashbytes, - reward_cycle, - period, - amount, - max_amount, - auth_id, - wrong_address - ) => { - // Arrange - fc.pre(amount <= max_amount); - fc.pre(wrong_address !== caller); - const expectedResponseErr = ERR_INVALID_SIGNATURE_PUBKEY; - const signer_private_key = privateKeyMapping[caller]; - const wrong_private_key = privateKeyMapping[wrong_address]; - const signer_key_message_hash = buildSignerKeyMessageHash( - version, - hashbytes, - reward_cycle, - "topic", - period, - max_amount, - auth_id - ); - const signer_sig = signMessageHash( - signer_private_key, - signer_key_message_hash - ); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "verify-signer-key-sig", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(reward_cycle), - Cl.stringAscii("topic"), - Cl.uint(period), - Cl.some(Cl.buffer(signer_sig)), - Cl.buffer(pubKeyfromPrivKey(wrong_private_key).data), - Cl.uint(amount), - Cl.uint(max_amount), - Cl.uint(auth_id), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - expect(actual.value).toBeInt(expectedResponseErr); - } - ) - ); - }); - - it("should return (err 38) verify-signer-key-sig called with wrong public key", () => { - fc.assert( - fc.property( - fc.constantFrom(...simnet.getAccounts().values()), - fc.nat({ max: 6 }), - fc.array(fc.nat({ max: 255 }), { maxLength: 32 }), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - fc.nat(), - ( - caller, - version, - hashbytes, - reward_cycle, - period, - amount, - max_amount, - auth_id - ) => { - // Arrange - fc.pre(amount > max_amount); - const expectedResponseErr = ERR_SIGNER_AUTH_AMOUNT_TOO_HIGH; - const signer_private_key = privateKeyMapping[caller]; - const signer_key_message_hash = buildSignerKeyMessageHash( - version, - hashbytes, - reward_cycle, - "topic", - period, - max_amount, - auth_id - ); - const signer_sig = signMessageHash( - signer_private_key, - signer_key_message_hash - ); - // Act - const { result: actual } = simnet.callReadOnlyFn( - "pox-4", - "verify-signer-key-sig", - [ - Cl.tuple({ - version: Cl.buffer(Uint8Array.from([version])), - hashbytes: Cl.buffer(Uint8Array.from(hashbytes)), - }), - Cl.uint(reward_cycle), - Cl.stringAscii("topic"), - Cl.uint(period), - Cl.some(Cl.buffer(signer_sig)), - Cl.buffer(pubKeyfromPrivKey(signer_private_key).data), - Cl.uint(amount), - Cl.uint(max_amount), - Cl.uint(auth_id), - ], - caller - ); - // Assert - assert(isClarityType(actual, ClarityType.ResponseErr)); - assert(isClarityType(actual.value, ClarityType.Int)); - expect(actual).toBeErr(Cl.int(expectedResponseErr)); - expect(actual.value).toBeInt(expectedResponseErr); - } - ) - ); - }); -}); diff --git a/contrib/core-contract-tests/vitest.config.js b/contrib/core-contract-tests/vitest.config.js index 364c55f7351..f856409052c 100644 --- a/contrib/core-contract-tests/vitest.config.js +++ b/contrib/core-contract-tests/vitest.config.js @@ -1,10 +1,7 @@ /// import { defineConfig } from "vite"; -import { - vitestSetupFilePath, - getClarinetVitestsArgv, -} from "@hirosystems/clarinet-sdk/vitest"; +import { vitestSetupFilePath, getClarinetVitestsArgv } from "@hirosystems/clarinet-sdk/vitest"; /* In this file, Vitest is configured so that it works seamlessly with Clarinet and the Simnet. @@ -14,7 +11,7 @@ import { `vitestSetupFilePath` points to a file in the `@hirosystems/clarinet-sdk` package that does two things: - run `before` hooks to initialize the simnet and `after` hooks to collect costs and coverage reports. - - load custom vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`) + - load custom Vitest matchers to work with Clarity values (such as `expect(...).toBeUint()`). The `getClarinetVitestsArgv()` will parse options passed to the command `vitest run --` - vitest run -- --manifest ./Clarinet.toml # pass a custom path @@ -24,11 +21,7 @@ import { export default defineConfig({ test: { environment: "clarinet", // use vitest-environment-clarinet - pool: "forks", - poolOptions: { - threads: { singleThread: true }, - forks: { singleFork: true }, - }, + singleThread: true, setupFiles: [ vitestSetupFilePath, // custom setup files can be added here diff --git a/contrib/tools/relay-server/Cargo.toml b/contrib/tools/relay-server/Cargo.toml index 3736469065e..70ee265bfa8 100644 --- a/contrib/tools/relay-server/Cargo.toml +++ b/contrib/tools/relay-server/Cargo.toml @@ -8,4 +8,4 @@ name = "relay-server" path = "src/main.rs" [dependencies] -hashbrown = { workspace = true } +stacks_common = { path = "../../../stacks-common", package = "stacks-common" } diff --git a/contrib/tools/relay-server/src/http.rs b/contrib/tools/relay-server/src/http.rs index c84f833beec..58ee9d2f9d0 100644 --- a/contrib/tools/relay-server/src/http.rs +++ b/contrib/tools/relay-server/src/http.rs @@ -1,6 +1,6 @@ use std::io::{Error, Read}; -use hashbrown::HashMap; +use stacks_common::types::StacksHashMap as HashMap; use crate::to_io_result::ToIoResult; diff --git a/contrib/tools/relay-server/src/state.rs b/contrib/tools/relay-server/src/state.rs index 084779c8d47..91696471cda 100644 --- a/contrib/tools/relay-server/src/state.rs +++ b/contrib/tools/relay-server/src/state.rs @@ -1,4 +1,4 @@ -use hashbrown::HashMap; +use stacks_common::types::StacksHashMap as HashMap; #[derive(Default)] pub struct State { diff --git a/contrib/tools/relay-server/src/url.rs b/contrib/tools/relay-server/src/url.rs index aedc5711d8d..962ae98e374 100644 --- a/contrib/tools/relay-server/src/url.rs +++ b/contrib/tools/relay-server/src/url.rs @@ -1,4 +1,4 @@ -use hashbrown::HashMap; +use stacks_common::types::StacksHashMap as HashMap; pub trait QueryEx { fn url_query(&self) -> HashMap<&str, &str>; diff --git a/docs/ci-release.md b/docs/ci-release.md index 4e21ed631df..ff0bca229b9 100644 --- a/docs/ci-release.md +++ b/docs/ci-release.md @@ -135,35 +135,6 @@ ex: - `Stacks Blockchain Tests`: - `full-genesis`: Tests related to full genesis -### Checking the result of multiple tests at once - -You can use the [check-jobs-status](https://github.com/stacks-network/actions/tree/main/check-jobs-status) composite action in order to check that multiple tests are successful in 1 job. -If any of the tests given to the action (JSON string of `needs` field) fails, the step that calls the action will also fail. - -If you have to mark more than 1 job from the same workflow required in a ruleset, you can use this action in a separate job and only add that job as required. - -In the following example, `unit-tests` is a matrix job with 8 partitions (i.e. 8 jobs are running), while the others are normal jobs. -If any of the 11 jobs are failing, the `check-tests` job will also fail. - -```yaml -check-tests: - name: Check Tests - runs-on: ubuntu-latest - if: always() - needs: - - full-genesis - - unit-tests - - open-api-validation - - core-contracts-clarinet-test - steps: - - name: Check Tests Status - id: check_tests_status - uses: stacks-network/actions/check-jobs-status@main - with: - jobs: ${{ toJson(needs) }} - summary_print: "true" -``` - ## Triggering a workflow ### PR a branch to develop diff --git a/docs/rpc-endpoints.md b/docs/rpc-endpoints.md index 6815adfc61c..bb8a4b28f85 100644 --- a/docs/rpc-endpoints.md +++ b/docs/rpc-endpoints.md @@ -96,24 +96,69 @@ Callers who wish to download more headers will need to issue this query multiple times, with a `?tip=` query parameter set to the index block hash of the earliest header received. -Returns a JSON list containing the following: +Returns a +[SIP-003](https://github.com/stacksgov/sips/blob/main/sips/sip-003/sip-003-peer-network.md)-encoded +vector with length up to [Count] that contains a list of the following SIP-003-encoded +structures: -```json -[ - { - "consensus_hash": "dff37af13badf99683228e61c71585bb7a82ac92", - "header": -"0600000047ddfbee8c00000000000222c7ad9042e5a67a703ff3b06581e3fd8a2f1496a563dc41462ebf8e5b046b43e7085f20e828840f26fefbe93a048f6c390ce55b954b188a43781fa0db61c091dbb840717fda77f9fc16d8ac85f80bbf2d04a20d17328390e03b8f496986f6351def656fd12cc4b8fe5e2cfb8d3f2e67c3a700000000000000000000000000000000000000000000000000000000000000000000fb432fbe28fb60ab37c8f59eec2397a0d0bcaf679a34b39d02d338935c7e723e062d571e331fb5016d3000ab68da691baa02b4a5dde7befa2edceb219af959312544d306919a59ee4cfd616dc3cc44a6f01ac7c8", - "parent_block_id": -"e0cb2be07552556f856503d2fbd855a27d49dc5a8c47fb2d9f0314eb6bad6861" - } -] +```rust +struct ExtendedStacksHeader { + consensus_hash: ConsensusHash, + header: StacksBlockHeader, + parent_block_id: StacksBlockId, +} +``` + +Where `ConsensusHash` is a 20-byte byte buffer. + +Where `StacksBlockId` is a 32-byte byte buffer. + +Where `StacksBlockHeader` is the following SIP-003-encoded structure: + +```rust +struct StacksBlockHeader { + version: u8, + total_work: StacksWorkScore, + proof: VRFProof, + parent_block: BlockHeaderHash, + parent_microblock: BlockHeaderHash, + parent_microblock_sequence: u16, + tx_merkle_root: Sha512Trunc256Sum, + state_index_root: TrieHash, + microblock_pubkey_hash: Hash160, +} ``` -The `consensus_hash` field identifies the sortition in which the given block was -chosen. The `header` is the raw block header, a a hex string. The -`parent_block_id` is the block ID hash of this block's parent, and can be used -as a `?tip=` query parameter to page through deeper and deeper block headers. +Where `BlockHeaderHash`, `Sha512Trunc256Sum`, and `TrieHash` are 32-byte byte +buffers. + +Where `Hash160` is a 20-byte byte buffer. + +Where `StacksWorkScore` and `VRFProof` are the following SIP-003-encoded structures: + +```rust +struct StacksWorkScore { + burn: u64, + work: u64, +} +``` + +```rust +struct VRFProof { + Gamma: [u8; 32] + c: [u8; 16] + s: [u8; 32] +} +``` + +The interpretation of most these fields is beyond the scope of this document (please +see +[SIP-005](https://github.com/stacksgov/sips/blob/main/sips/sip-005/sip-005-blocks-and-transactions.md) +for details). However, it is worth pointing out that `parent_block_id` is a +valid argument to the `?tip=` query parameter. If the caller of this API +endpoint wants to receive more than 2100 contiguous headers, it would use the +oldest header's `parent_block_id` field from the previous call as the `?tip=` +argument to the next call in order to fetch the next batch of ancestor headers. This API endpoint may return a list of zero headers if `?tip=` refers to the hash of the Stacks genesis block. @@ -495,41 +540,3 @@ Error examples: "reason_code": "ChainstateError" } ``` - -### GET /v3/blocks/[Block ID] - -Fetch a Nakamoto block given its block ID hash. This returns the raw block -data. - -This will return 404 if the block does not exist. - -### GET /v3/tenures/[Block ID] - -Fetch a Nakamoto block and all of its ancestors in the same tenure, given its -block ID hash. At most `MAX_MESSAGE_LEN` (i.e. 2 MB) of data will be returned. -If the tenure is larger than this, then the caller can page through the tenure -by repeatedly invoking this endpoint with the deepest block's block ID until -only a single block is returned (i.e. the tenure-start block). - -This method returns one or more raw blocks, concatenated together. - -This method returns 404 if there are no blocks with the given block ID. - -### GET /v3/tenures/info - -Return metadata about the highest-known tenure, as the following JSON structure: - -```json -{ - "consensus_hash": "dca60a97a135189d67a5ad6d2dac90f289b19c96", - "reward_cycle": 5, - "tip_block_id": "317c0ee162d1ee02c67d5bca79003dafc59aa84579360387f43650c37491ac3b", - "tip_height": 116 -} -``` - -Here, `consensus_hash` identifies the highest-known tenure (which may not be the -highest sortition), `reward_cycle` identifies the reward cycle number of this -tenure, `tip_block_id` idenitifies the highest-known block in this tenure, and -`tip_height` identifies that block's height. - diff --git a/docs/rpc/api/core-node/get_tenure_info.json b/docs/rpc/api/core-node/get_tenure_info.json deleted file mode 100644 index 052f5bc6148..00000000000 --- a/docs/rpc/api/core-node/get_tenure_info.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "consensus_hash": "4c5a49be0e34dc603b66f090fd07d28a2f76a2ad", - "parent_consensus_hash": "fa8a04af41957499afdd4082b9b702ffca9a4370", - "parent_tenure_start_block_id": "0cfec8433849d353ad6b2fe1173da143e3d4a3ab452588a14eb074d0181ac202", - "reward_cycle": 8, - "tenure_start_block_id": "0425099d51547c714df6a7864c040c1a605b198ff07f71d19a823139c88a35f8", - "tip_block_id": "52d64f5e47abc7666c4fed3fe850f381f93f2d588ee2a92a4e07b44f14588d5e", - "tip_height": 416 -} diff --git a/docs/rpc/openapi.yaml b/docs/rpc/openapi.yaml index f33e0dca732..ceaf0e4a9d9 100644 --- a/docs/rpc/openapi.yaml +++ b/docs/rpc/openapi.yaml @@ -1,13 +1,12 @@ openapi: 3.0.2 servers: - - url: http://localhost:20443 + - url: http://localhost:20443/ description: Local info: - title: Stacks 2.0+ RPC API + title: Stacks 2.0 RPC API version: '1.0.0' description: | This is the documentation for the `stacks-node` RPC interface. - license: CC-0 paths: /v2/transactions: @@ -40,7 +39,6 @@ paths: $ref: ./api/transaction/post-core-node-transactions-error.schema.json example: $ref: ./api/transaction/post-core-node-transactions-error.example.json - /v2/burn_ops/{burn_height}/{op_type}: get: summary: Get burn operations @@ -48,19 +46,6 @@ paths: tags: - Info operationId: get_burn_ops - parameters: - - name: burn_height - in: path - required: true - description: height of the burnchain (Bitcoin) - schema: - type: integer - - name: op_type - in: path - required: true - description: name of the burnchain operation type - schema: - type: string responses: 200: description: Burn operations list @@ -76,7 +61,6 @@ paths: peg_out_fulfill: value: $ref: ./api/core-node/get-burn-ops-peg-out-fulfill.example.json - /v2/contracts/interface/{contract_address}/{contract_name}: get: summary: Get contract interface @@ -576,8 +560,8 @@ paths: operationId: post_block_proposal description: | Used by stackers to validate a proposed Stacks block from a miner. - - **This API endpoint requires a basic Authorization header.** + + **This endpoint will only accept requests over the local loopback network interface.** responses: 202: description: Block proposal has been accepted for processing. The result will be returned via the event observer. @@ -610,13 +594,6 @@ paths: Used to get stacker and signer set information for a given cycle. This will only return information for cycles started in Epoch-2.5 where PoX-4 was active and subsequent cycles. - parameters: - - name: cycle_number - in: path - required: true - description: reward cycle number - schema: - type: integer responses: 200: description: Information for the given reward cycle @@ -630,80 +607,3 @@ paths: application/json: example: $ref: ./api/core-node/get_stacker_set.400.example.json - - /v3/blocks/{block_id}: - get: - summary: Fetch a Nakamoto block - tags: - - Blocks - operationId: get_block_v3 - description: - Fetch a Nakamoto block by its index block hash. - parameters: - - name: block_id - in: path - description: The block's ID hash - required: true - schema: - type: string - responses: - 200: - description: The raw SIP-003-encoded block will be returned. - content: - application/octet-stream: - schema: - type: string - format: binary - 404: - description: The block could not be found - content: - application/text-plain: {} - - /v3/tenures/info: - get: - summary: Fetch metadata about the ongoing Nakamoto tenure - tags: - - Blocks - operationId: get_tenure_info - description: - Fetch metadata about the ongoing Nakamoto tenure. This information is sufficient to obtain and authenticate the highest complete tenure, as well as obtain new tenure blocks. - responses: - 200: - description: Metadata about the ongoing tenure - content: - application/json: - example: - $ref: ./api/core-node/get_tenure_info.json - - /v3/tenures/{block_id}: - get: - summary: Fetch a sequence of Nakamoto blocks in a tenure - tags: - - Blocks - operationId: get_tenures - description: - Fetch a sequence of Nakamoto blocks in a tenure. The blocks will be served in order from highest to lowest. The blocks will be encoded in their SIP-003 wire format, and concatenated together. - responses: - 200: - description: SIP-003-encoded Nakamoto blocks, concatenated together - content: - application/octet-stream: - schema: - type: string - format: binary - parameters: - - name: block_id - in: path - description: - The tenure-start block ID of the tenure to query - required: true - schema: - type: string - - name: stop - in: query - description: - The block ID hash of the highest block in this tenure that is already known to the caller. Neither the corresponding block nor any of its ancestors will be served. This is used to fetch tenure blocks that the caller does not have. - required: false - schema: - type: string - diff --git a/libsigner/Cargo.toml b/libsigner/Cargo.toml index e04dcbbdc1a..4b1f21eef75 100644 --- a/libsigner/Cargo.toml +++ b/libsigner/Cargo.toml @@ -44,8 +44,8 @@ features = ["arbitrary_precision", "unbounded_depth"] version = "0.24.3" features = ["serde", "recovery"] -[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] +[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } -[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os = "windows")))'.dependencies] +[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), target_env = "msvc"))'.dependencies] sha2 = { version = "0.10" } diff --git a/libsigner/src/events.rs b/libsigner/src/events.rs index 2d156559ff2..94bb17a85b4 100644 --- a/libsigner/src/events.rs +++ b/libsigner/src/events.rs @@ -27,18 +27,15 @@ use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; use blockstack_lib::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, ValidateRejectCode, }; -use blockstack_lib::net::stackerdb::MINER_SLOT_COUNT; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; use serde::{Deserialize, Serialize}; -use serde_json::Value; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, }; pub use stacks_common::consts::SIGNER_SLOTS_PER_USER; -use stacks_common::types::chainstate::StacksPublicKey; use stacks_common::util::hash::Sha512Trunc256Sum; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, @@ -53,29 +50,11 @@ use wsts::state_machine::signer; use crate::http::{decode_http_body, decode_http_request}; use crate::{EventError, SignerMessage}; -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -/// BlockProposal sent to signers -pub struct BlockProposalSigners { - /// The block itself - pub block: NakamotoBlock, - /// The burn height the block is mined during - pub burn_height: u64, - /// The reward cycle the block is mined during - pub reward_cycle: u64, -} - /// Event enum for newly-arrived signer subscribed events #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SignerEvent { - /// A miner sent a message over .miners - /// The `Vec` will contain any block proposals made by the miner during this StackerDB event. - /// The `Vec` will contain any signer WSTS messages made by the miner while acting as a coordinator. - /// The `Option` will contain the message sender's public key if either of the vecs is non-empty. - MinerMessages( - Vec, - Vec, - Option, - ), + /// The miner proposed blocks for signers to observe and sign + ProposedBlocks(Vec), /// The signer messages for other signers and miners to observe /// The u32 is the signer set to which the message belongs (either 0 or 1) SignerMessages(u32, Vec), @@ -83,28 +62,6 @@ pub enum SignerEvent { BlockValidationResponse(BlockValidateResponse), /// Status endpoint request StatusCheck, - /// A new burn block event was received with the given burnchain block height - NewBurnBlock(u64), -} - -impl StacksMessageCodec for BlockProposalSigners { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.block.consensus_serialize(fd)?; - self.burn_height.consensus_serialize(fd)?; - self.reward_cycle.consensus_serialize(fd)?; - Ok(()) - } - - fn consensus_deserialize(fd: &mut R) -> Result { - let block = NakamotoBlock::consensus_deserialize(fd)?; - let burn_height = u64::consensus_deserialize(fd)?; - let reward_cycle = u64::consensus_deserialize(fd)?; - Ok(BlockProposalSigners { - block, - burn_height, - reward_cycle, - }) - } } /// Trait to implement a stop-signaler for the event receiver thread. @@ -238,15 +195,11 @@ impl EventStopSignaler for SignerStopSignaler { // We need to send actual data to trigger the event receiver let body = "Yo. Shut this shit down!".to_string(); let req = format!( - "POST /shutdown HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nContent-Length: {}\r\nContent-Type: text/plain\r\n\r\n{}", - self.local_addr, - body.len(), + "POST /shutdown HTTP/1.0\r\nContent-Length: {}\r\n\r\n{}", + &body.len(), body ); - match stream.write_all(req.as_bytes()) { - Err(e) => error!("Failed to send shutdown request: {}", e), - _ => (), - }; + stream.write_all(req.as_bytes()).unwrap(); } } } @@ -267,14 +220,12 @@ impl EventReceiver for SignerEventReceiver { /// Errors are recoverable -- the caller should call this method again even if it returns an /// error. fn next_event(&mut self) -> Result { - self.with_server(|event_receiver, http_server, _is_mainnet| { + self.with_server(|event_receiver, http_server, is_mainnet| { // were we asked to terminate? if event_receiver.is_stopped() { return Err(EventError::Terminated); } - debug!("Request handling"); let request = http_server.recv()?; - debug!("Got request"; "method" => %request.method(), "path" => request.url()); if request.url() == "/status" { request @@ -290,26 +241,21 @@ impl EventReceiver for SignerEventReceiver { ))); } if request.url() == "/stackerdb_chunks" { - process_stackerdb_event(event_receiver.local_addr, request) - .map_err(|e| { - error!("Error processing stackerdb_chunks message"; "err" => ?e); - e - }) + process_stackerdb_event(event_receiver.local_addr, request, is_mainnet) } else if request.url() == "/proposal_response" { process_proposal_response(request) - } else if request.url() == "/new_burn_block" { - process_new_burn_block_event(request) } else { let url = request.url().to_string(); - // `/new_block` is expected, but not specifically handled. do not log. - if &url != "/new_block" { - debug!( - "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", - event_receiver.local_addr, - url - ); + + info!( + "[{:?}] next_event got request with unexpected url {}, return OK so other side doesn't keep sending this", + event_receiver.local_addr, + request.url() + ); + + if let Err(e) = request.respond(HttpResponse::empty(200u16)) { + error!("Failed to respond to request: {:?}", &e); } - ack_dispatcher(request); Err(EventError::UnrecognizedEvent(url)) } })? @@ -365,22 +311,20 @@ impl EventReceiver for SignerEventReceiver { } } -fn ack_dispatcher(request: HttpRequest) { - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - }; -} - /// Process a stackerdb event from the node fn process_stackerdb_event( local_addr: Option, mut request: HttpRequest, + is_mainnet: bool, ) -> Result { debug!("Got stackerdb_chunks event"); let mut body = String::new(); if let Err(e) = request.as_reader().read_to_string(&mut body) { error!("Failed to read body: {:?}", &e); - ack_dispatcher(request); + + if let Err(e) = request.respond(HttpResponse::empty(200u16)) { + error!("Failed to respond to request: {:?}", &e); + }; return Err(EventError::MalformedRequest(format!( "Failed to read body: {:?}", &e @@ -390,84 +334,47 @@ fn process_stackerdb_event( let event: StackerDBChunksEvent = serde_json::from_slice(body.as_bytes()) .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event_contract_id = event.contract_id.clone(); - - let signer_event = match SignerEvent::try_from(event) { - Err(e) => { - info!( - "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", - local_addr, - event_contract_id - ); - ack_dispatcher(request); - return Err(e.into()); + let signer_event = if event.contract_id == boot_code_id(MINERS_NAME, is_mainnet) { + let blocks: Vec = event + .modified_slots + .iter() + .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) + .collect(); + SignerEvent::ProposedBlocks(blocks) + } else if event.contract_id.name.to_string().starts_with(SIGNERS_NAME) + && event.contract_id.issuer.1 == [0u8; 20] + { + let Some((signer_set, _)) = + get_signers_db_signer_set_message_id(event.contract_id.name.as_str()) + else { + return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); + }; + // signer-XXX-YYY boot contract + let signer_messages: Vec = event + .modified_slots + .iter() + .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) + .collect(); + SignerEvent::SignerMessages(signer_set, signer_messages) + } else { + info!( + "[{:?}] next_event got event from an unexpected contract id {}, return OK so other side doesn't keep sending this", + local_addr, + event.contract_id + ); + if let Err(e) = request.respond(HttpResponse::empty(200u16)) { + error!("Failed to respond to request: {:?}", &e); } - Ok(x) => x, + return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); }; - ack_dispatcher(request); + if let Err(e) = request.respond(HttpResponse::empty(200u16)) { + error!("Failed to respond to request: {:?}", &e); + } Ok(signer_event) } -impl TryFrom for SignerEvent { - type Error = EventError; - - fn try_from(event: StackerDBChunksEvent) -> Result { - let signer_event = if event.contract_id.name.as_str() == MINERS_NAME - && event.contract_id.is_boot() - { - let mut blocks = vec![]; - let mut messages = vec![]; - let mut miner_pk = None; - for chunk in event.modified_slots { - miner_pk = Some(chunk.recover_pk().map_err(|e| { - EventError::MalformedRequest(format!( - "Failed to recover PK from StackerDB chunk: {e}" - )) - })?); - if chunk.slot_id % MINER_SLOT_COUNT == 0 { - // block - let Ok(block) = - BlockProposalSigners::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - blocks.push(block); - } else if chunk.slot_id % MINER_SLOT_COUNT == 1 { - // message - let Ok(msg) = SignerMessage::consensus_deserialize(&mut chunk.data.as_slice()) - else { - continue; - }; - messages.push(msg); - } else { - return Err(EventError::UnrecognizedEvent( - "Unrecognized slot_id for miners contract".into(), - )); - }; - } - SignerEvent::MinerMessages(blocks, messages, miner_pk) - } else if event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot() { - let Some((signer_set, _)) = - get_signers_db_signer_set_message_id(event.contract_id.name.as_str()) - else { - return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); - }; - // signer-XXX-YYY boot contract - let signer_messages: Vec = event - .modified_slots - .iter() - .filter_map(|chunk| read_next::(&mut &chunk.data[..]).ok()) - .collect(); - SignerEvent::SignerMessages(signer_set, signer_messages) - } else { - return Err(EventError::UnrecognizedStackerDBContract(event.contract_id)); - }; - Ok(signer_event) - } -} - /// Process a proposal response from the node fn process_proposal_response(mut request: HttpRequest) -> Result { debug!("Got proposal_response event"); @@ -494,39 +401,7 @@ fn process_proposal_response(mut request: HttpRequest) -> Result Result { - debug!("Got burn_block event"); - let mut body = String::new(); - if let Err(e) = request.as_reader().read_to_string(&mut body) { - error!("Failed to read body: {:?}", &e); - - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - return Err(EventError::MalformedRequest(format!( - "Failed to read body: {:?}", - &e - ))); - } - #[derive(Debug, Deserialize)] - struct TempBurnBlockEvent { - burn_block_hash: String, - burn_block_height: u64, - reward_recipients: Vec, - reward_slot_holders: Vec, - burn_amount: u64, - } - let temp: TempBurnBlockEvent = serde_json::from_slice(body.as_bytes()) - .map_err(|e| EventError::Deserialize(format!("Could not decode body to JSON: {:?}", &e)))?; - let event = SignerEvent::NewBurnBlock(temp.burn_block_height); - if let Err(e) = request.respond(HttpResponse::empty(200u16)) { - error!("Failed to respond to request: {:?}", &e); - } - Ok(event) -} - -pub fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { +fn get_signers_db_signer_set_message_id(name: &str) -> Option<(u32, u32)> { // Splitting the string by '-' let parts: Vec<&str> = name.split('-').collect(); if parts.len() != 3 { diff --git a/libsigner/src/http.rs b/libsigner/src/http.rs index fe841415a9c..2618ed5626c 100644 --- a/libsigner/src/http.rs +++ b/libsigner/src/http.rs @@ -18,9 +18,9 @@ use std::io; use std::io::{Read, Write}; use std::net::SocketAddr; -use hashbrown::HashMap; use stacks_common::codec::MAX_MESSAGE_LEN; use stacks_common::deps_common::httparse; +use stacks_common::types::StacksHashMap as HashMap; use stacks_common::util::chunked_encoding::*; use crate::error::{EventError, RPCError}; @@ -238,12 +238,12 @@ pub fn run_http_request( let req_txt = if let Some(content_type) = content_type { format!( - "{} {} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nContent-Type: {}\r\n{}User-Agent: libsigner/0.1\r\nAccept: */*\r\n\r\n", + "{} {} HTTP/1.0\r\nHost: {}\r\nConnection: close\r\nContent-Type: {}\r\n{}User-Agent: libsigner/0.1\r\nAccept: */*\r\n\r\n", verb, path, host, content_type, content_length_hdr ) } else { format!( - "{} {} HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n{}User-Agent: libsigner/0.1\r\nAccept: */*\r\n\r\n", + "{} {} HTTP/1.0\r\nHost: {}\r\nConnection: close\r\n{}User-Agent: libsigner/0.1\r\nAccept: */*\r\n\r\n", verb, path, host, content_length_hdr ) }; diff --git a/libsigner/src/libsigner.rs b/libsigner/src/libsigner.rs index 0b16e97e19d..e48f4014e1f 100644 --- a/libsigner/src/libsigner.rs +++ b/libsigner/src/libsigner.rs @@ -42,16 +42,13 @@ mod http; mod messages; mod runloop; mod session; -mod signer_set; pub use crate::error::{EventError, RPCError}; pub use crate::events::{ - BlockProposalSigners, EventReceiver, EventStopSignaler, SignerEvent, SignerEventReceiver, - SignerStopSignaler, + EventReceiver, EventStopSignaler, SignerEvent, SignerEventReceiver, SignerStopSignaler, }; pub use crate::messages::{ - BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerMessage, + BlockRejection, BlockResponse, RejectCode, SignerMessage, BLOCK_MSG_ID, TRANSACTIONS_MSG_ID, }; pub use crate::runloop::{RunningSigner, Signer, SignerRunLoop}; pub use crate::session::{SignerSession, StackerDBSession}; -pub use crate::signer_set::{Error as ParseSignerEntriesError, SignerEntries}; diff --git a/libsigner/src/messages.rs b/libsigner/src/messages.rs index 1b6e7f179fc..a5c7fe2ffee 100644 --- a/libsigner/src/messages.rs +++ b/libsigner/src/messages.rs @@ -14,23 +14,13 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -//! Messages in the signer-miner interaction have a multi-level hierarchy. -//! Signers send messages to each other through Packet messages. These messages, -//! as well as `BlockResponse`, `Transactions`, and `DkgResults` messages are stored -//! StackerDBs based on the `MessageSlotID` for the particular message type. This is a -//! shared identifier space between the four message kinds and their subtypes. -//! -//! These four message kinds are differentiated with a `SignerMessageTypePrefix` -//! and the `SignerMessage` enum. - -use std::fmt::{Debug, Display}; +use std::hash::Hash; use std::io::{Read, Write}; use std::net::{SocketAddr, TcpListener, TcpStream}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; -use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::chainstate::nakamoto::NakamotoBlock; use blockstack_lib::chainstate::stacks::events::StackerDBChunksEvent; use blockstack_lib::chainstate::stacks::{StacksTransaction, ThresholdSignature}; @@ -40,13 +30,13 @@ use blockstack_lib::net::api::postblock_proposal::{ use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::types::serialization::SerializationError; use clarity::vm::types::QualifiedContractIdentifier; -use hashbrown::{HashMap, HashSet}; use serde::{Deserialize, Serialize}; use stacks_common::codec::{ read_next, read_next_at_most, read_next_exact, write_next, Error as CodecError, StacksMessageCodec, }; use stacks_common::consts::SIGNER_SLOTS_PER_USER; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::Sha512Trunc256Sum; use tiny_http::{ Method as HttpMethod, Request as HttpRequest, Response as HttpResponse, Server as HttpServer, @@ -65,67 +55,28 @@ use wsts::state_machine::{signer, SignError}; use crate::http::{decode_http_body, decode_http_request}; use crate::EventError; -define_u8_enum!( -/// Enum representing the stackerdb message identifier: this is -/// the contract index in the signers contracts (i.e., X in signers-0-X) -MessageSlotID { - /// DkgBegin message - DkgBegin = 0, - /// DkgPrivateBegin - DkgPrivateBegin = 1, - /// DkgEndBegin - DkgEndBegin = 2, - /// DkgEnd - DkgEnd = 3, - /// DkgPublicshares - DkgPublicShares = 4, - /// DkgPrivateShares - DkgPrivateShares = 5, - /// NonceRequest - NonceRequest = 6, - /// NonceResponse - NonceResponse = 7, - /// SignatureShareRequest - SignatureShareRequest = 8, - /// SignatureShareResponse - SignatureShareResponse = 9, - /// Block proposal responses for miners to observe - BlockResponse = 10, - /// Transactions list for miners and signers to observe - Transactions = 11, - /// DKG Results - DkgResults = 12 -}); +// The slot IDS for each message type +const DKG_BEGIN_MSG_ID: u32 = 0; +const DKG_PRIVATE_BEGIN_MSG_ID: u32 = 1; +const DKG_END_BEGIN_MSG_ID: u32 = 2; +const DKG_END_MSG_ID: u32 = 3; +const DKG_PUBLIC_SHARES_MSG_ID: u32 = 4; +const DKG_PRIVATE_SHARES_MSG_ID: u32 = 5; +const NONCE_REQUEST_MSG_ID: u32 = 6; +const NONCE_RESPONSE_MSG_ID: u32 = 7; +const SIGNATURE_SHARE_REQUEST_MSG_ID: u32 = 8; +const SIGNATURE_SHARE_RESPONSE_MSG_ID: u32 = 9; +/// The slot ID for the block response for miners to observe +pub const BLOCK_MSG_ID: u32 = 10; +/// The slot ID for the transactions list for miners and signers to observe +pub const TRANSACTIONS_MSG_ID: u32 = 11; define_u8_enum!(SignerMessageTypePrefix { BlockResponse = 0, Packet = 1, - Transactions = 2, - DkgResults = 3 + Transactions = 2 }); -impl MessageSlotID { - /// Return the StackerDB contract corresponding to messages of this type - pub fn stacker_db_contract( - &self, - mainnet: bool, - reward_cycle: u64, - ) -> QualifiedContractIdentifier { - NakamotoSigners::make_signers_db_contract_id(reward_cycle, self.to_u32(), mainnet) - } - - /// Return the u32 identifier for the message slot (used to index the contract that stores it) - pub fn to_u32(&self) -> u32 { - self.to_u8().into() - } -} - -impl Display for MessageSlotID { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}({})", self, self.to_u8()) - } -} - impl TryFrom for SignerMessageTypePrefix { type Error = CodecError; fn try_from(value: u8) -> Result { @@ -141,7 +92,6 @@ impl From<&SignerMessage> for SignerMessageTypePrefix { SignerMessage::Packet(_) => SignerMessageTypePrefix::Packet, SignerMessage::BlockResponse(_) => SignerMessageTypePrefix::BlockResponse, SignerMessage::Transactions(_) => SignerMessageTypePrefix::Transactions, - SignerMessage::DkgResults { .. } => SignerMessageTypePrefix::DkgResults, } } } @@ -219,7 +169,7 @@ impl From<&RejectCode> for RejectCodeTypePrefix { } /// The messages being sent through the stacker db contracts -#[derive(Clone, PartialEq, Serialize, Deserialize)] +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum SignerMessage { /// The signed/validated Nakamoto block for miners to observe BlockResponse(BlockResponse), @@ -227,101 +177,30 @@ pub enum SignerMessage { Packet(Packet), /// The list of transactions for miners and signers to observe that this signer cares about Transactions(Vec), - /// The results of a successful DKG - DkgResults { - /// The aggregate key from the DKG round - aggregate_key: Point, - /// The polynomial commits used to construct the aggregate key - party_polynomials: Vec<(u32, PolyCommitment)>, - }, -} - -impl Debug for SignerMessage { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::BlockResponse(b) => Debug::fmt(b, f), - Self::Packet(p) => Debug::fmt(p, f), - Self::Transactions(t) => f.debug_tuple("Transactions").field(t).finish(), - Self::DkgResults { - aggregate_key, - party_polynomials, - } => { - let party_polynomials: Vec<_> = party_polynomials - .iter() - .map(|(ix, commit)| (ix, commit.to_string())) - .collect(); - f.debug_struct("DkgResults") - .field("aggregate_key", &aggregate_key.to_string()) - .field("party_polynomials", &party_polynomials) - .finish() - } - } - } } impl SignerMessage { /// Helper function to determine the slot ID for the provided stacker-db writer id - pub fn msg_id(&self) -> MessageSlotID { + pub fn msg_id(&self) -> u32 { match self { Self::Packet(packet) => match packet.msg { - Message::DkgBegin(_) => MessageSlotID::DkgBegin, - Message::DkgPrivateBegin(_) => MessageSlotID::DkgPrivateBegin, - Message::DkgEndBegin(_) => MessageSlotID::DkgEndBegin, - Message::DkgEnd(_) => MessageSlotID::DkgEnd, - Message::DkgPublicShares(_) => MessageSlotID::DkgPublicShares, - Message::DkgPrivateShares(_) => MessageSlotID::DkgPrivateShares, - Message::NonceRequest(_) => MessageSlotID::NonceRequest, - Message::NonceResponse(_) => MessageSlotID::NonceResponse, - Message::SignatureShareRequest(_) => MessageSlotID::SignatureShareRequest, - Message::SignatureShareResponse(_) => MessageSlotID::SignatureShareResponse, + Message::DkgBegin(_) => DKG_BEGIN_MSG_ID, + Message::DkgPrivateBegin(_) => DKG_PRIVATE_BEGIN_MSG_ID, + Message::DkgEndBegin(_) => DKG_END_BEGIN_MSG_ID, + Message::DkgEnd(_) => DKG_END_MSG_ID, + Message::DkgPublicShares(_) => DKG_PUBLIC_SHARES_MSG_ID, + Message::DkgPrivateShares(_) => DKG_PRIVATE_SHARES_MSG_ID, + Message::NonceRequest(_) => NONCE_REQUEST_MSG_ID, + Message::NonceResponse(_) => NONCE_RESPONSE_MSG_ID, + Message::SignatureShareRequest(_) => SIGNATURE_SHARE_REQUEST_MSG_ID, + Message::SignatureShareResponse(_) => SIGNATURE_SHARE_RESPONSE_MSG_ID, }, - Self::BlockResponse(_) => MessageSlotID::BlockResponse, - Self::Transactions(_) => MessageSlotID::Transactions, - Self::DkgResults { .. } => MessageSlotID::DkgResults, + Self::BlockResponse(_) => BLOCK_MSG_ID, + Self::Transactions(_) => TRANSACTIONS_MSG_ID, } } } -impl SignerMessage { - /// Provide an interface for consensus serializing a DkgResults `SignerMessage` - /// without constructing the DkgResults struct (this eliminates a clone) - pub fn serialize_dkg_result<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - SignerMessageTypePrefix::DkgResults - .to_u8() - .consensus_serialize(fd)?; - Self::serialize_dkg_result_components(fd, aggregate_key, party_polynomials) - } - - /// Serialize the internal components of DkgResults (this eliminates a clone) - fn serialize_dkg_result_components<'a, W: Write, I>( - fd: &mut W, - aggregate_key: &Point, - party_polynomials: I, - ) -> Result<(), CodecError> - where - I: ExactSizeIterator + Iterator, - { - aggregate_key.inner_consensus_serialize(fd)?; - let polynomials_len: u32 = party_polynomials - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - polynomials_len.consensus_serialize(fd)?; - for (party_id, polynomial) in party_polynomials { - party_id.consensus_serialize(fd)?; - polynomial.inner_consensus_serialize(fd)?; - } - Ok(()) - } -} - impl StacksMessageCodec for SignerMessage { fn consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(SignerMessageTypePrefix::from(self) as u8))?; @@ -335,16 +214,6 @@ impl StacksMessageCodec for SignerMessage { SignerMessage::Transactions(transactions) => { write_next(fd, transactions)?; } - SignerMessage::DkgResults { - aggregate_key, - party_polynomials, - } => { - Self::serialize_dkg_result_components( - fd, - aggregate_key, - party_polynomials.iter().map(|(a, b)| (a, b)), - )?; - } }; Ok(()) } @@ -365,24 +234,6 @@ impl StacksMessageCodec for SignerMessage { let transactions = read_next::, _>(fd)?; SignerMessage::Transactions(transactions) } - SignerMessageTypePrefix::DkgResults => { - let aggregate_key = Point::inner_consensus_deserialize(fd)?; - let party_polynomial_len = u32::consensus_deserialize(fd)?; - let mut party_polynomials = Vec::with_capacity( - party_polynomial_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..party_polynomial_len { - let party_id = u32::consensus_deserialize(fd)?; - let polynomial = PolyCommitment::inner_consensus_deserialize(fd)?; - party_polynomials.push((party_id, polynomial)); - } - Self::DkgResults { - aggregate_key, - party_polynomials, - } - } }; Ok(message) } @@ -399,7 +250,7 @@ impl StacksMessageCodecExtensions for Scalar { write_next(fd, &self.to_bytes()) } fn inner_consensus_deserialize(fd: &mut R) -> Result { - let scalar_bytes: [u8; 32] = read_next(fd)?; + let scalar_bytes = read_next::<[u8; 32], _>(fd)?; Ok(Scalar::from(scalar_bytes)) } } @@ -416,51 +267,6 @@ impl StacksMessageCodecExtensions for Point { } } -impl StacksMessageCodecExtensions for PolyCommitment { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - let commit_len: u32 = self - .poly - .len() - .try_into() - .map_err(|_| CodecError::ArrayTooLong)?; - commit_len.consensus_serialize(fd)?; - for poly in self.poly.iter() { - poly.inner_consensus_serialize(fd)?; - } - Ok(()) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = ID::inner_consensus_deserialize(fd)?; - let commit_len = u32::consensus_deserialize(fd)?; - let mut poly = Vec::with_capacity( - commit_len - .try_into() - .expect("FATAL: u32 could not fit in usize"), - ); - for _ in 0..commit_len { - poly.push(Point::inner_consensus_deserialize(fd)?); - } - Ok(Self { id, poly }) - } -} - -impl StacksMessageCodecExtensions for ID { - fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { - self.id.inner_consensus_serialize(fd)?; - self.kG.inner_consensus_serialize(fd)?; - self.kca.inner_consensus_serialize(fd) - } - - fn inner_consensus_deserialize(fd: &mut R) -> Result { - let id = Scalar::inner_consensus_deserialize(fd)?; - let k_g = Point::inner_consensus_deserialize(fd)?; - let kca = Scalar::inner_consensus_deserialize(fd)?; - Ok(Self { id, kG: k_g, kca }) - } -} - #[allow(non_snake_case)] impl StacksMessageCodecExtensions for TupleProof { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { @@ -494,7 +300,7 @@ impl StacksMessageCodecExtensions for BadPrivateShare { impl StacksMessageCodecExtensions for HashSet { fn inner_consensus_serialize(&self, fd: &mut W) -> Result<(), CodecError> { write_next(fd, &(self.len() as u32))?; - for i in self { + for i in self.iter() { write_next(fd, i)?; } Ok(()) @@ -736,12 +542,14 @@ impl StacksMessageCodecExtensions for DkgPrivateShares { let id = read_next::(fd)?; let num_share_map = read_next::(fd)?; let mut share_map = HashMap::new(); + for _ in 0..num_share_map { let id = read_next::(fd)?; let share: Vec = read_next(fd)?; share_map.insert(id, share); } - shares.push((id, share_map)); + //let share_map: hashbrown::HashMap<_, _> = share_map.into(); + shares.push((id, share_map.into())); } Ok(DkgPrivateShares { dkg_id, @@ -1018,27 +826,6 @@ pub enum BlockResponse { Rejected(BlockRejection), } -impl std::fmt::Display for BlockResponse { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - BlockResponse::Accepted(a) => { - write!( - f, - "BlockAccepted: signer_sighash = {}, signature = {}", - a.0, a.1 - ) - } - BlockResponse::Rejected(r) => { - write!( - f, - "BlockRejected: signer_sighash = {}, code = {}, reason = {}", - r.reason_code, r.reason, r.signer_signature_hash - ) - } - } - } -} - impl BlockResponse { /// Create a new accepted BlockResponse for the provided block signer signature hash and signature pub fn accepted(hash: Sha512Trunc256Sum, sig: Signature) -> Self { @@ -1298,6 +1085,7 @@ impl From for SignerMessage { #[cfg(test)] mod test { + use blockstack_lib::chainstate::stacks::{ TransactionAnchorMode, TransactionAuth, TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, @@ -1310,18 +1098,6 @@ mod test { use wsts::common::Signature; use super::{StacksMessageCodecExtensions, *}; - - #[test] - fn signer_slots_count_is_sane() { - let slot_identifiers_len = MessageSlotID::ALL.len(); - assert!( - SIGNER_SLOTS_PER_USER as usize >= slot_identifiers_len, - "stacks_common::SIGNER_SLOTS_PER_USER ({}) must be >= slot identifiers ({})", - SIGNER_SLOTS_PER_USER, - slot_identifiers_len, - ); - } - #[test] fn serde_reject_code() { let code = RejectCode::ValidationFailed(ValidateRejectCode::InvalidBlock); @@ -1565,6 +1341,7 @@ mod test { rng.fill(&mut bytes[..]); shares_map.insert(i, bytes.to_vec()); } + let shares_map: hashbrown::HashMap<_, _> = shares_map.into(); shares.push((i, shares_map)); } test_fixture_packet(Message::DkgPrivateShares(DkgPrivateShares { diff --git a/libsigner/src/runloop.rs b/libsigner/src/runloop.rs index 0b7eb2dbcf6..32b03260085 100644 --- a/libsigner/src/runloop.rs +++ b/libsigner/src/runloop.rs @@ -93,7 +93,12 @@ pub trait SignerRunLoop { } /// The top-level signer implementation -pub struct Signer { +pub struct Signer< + CMD: Send, + R: Send, + SL: SignerRunLoop + Send + Sync, + EV: EventReceiver + Send, +> { /// the runloop itself signer_loop: Option, /// the event receiver to use @@ -102,6 +107,8 @@ pub struct Signer { command_receiver: Option>, /// the result sender to use result_sender: Option>, + /// marker to permit the R type + _phantom: PhantomData, } /// The running signer implementation @@ -189,7 +196,13 @@ pub fn set_runloop_signal_handler(mut st }).expect("FATAL: failed to set signal handler"); } -impl Signer { +impl< + CMD: Send + 'static, + R: Send + 'static, + SL: SignerRunLoop + Send + Sync + 'static, + EV: EventReceiver + Send + 'static, + > Signer +{ /// Create a new signer with the given runloop and event receiver. pub fn new( runloop: SL, @@ -202,17 +215,10 @@ impl Signer { event_receiver: Some(event_receiver), command_receiver: Some(command_receiver), result_sender: Some(result_sender), + _phantom: PhantomData, } } -} -impl< - CMD: Send + 'static, - R: Send + 'static, - SL: SignerRunLoop + Send + 'static, - EV: EventReceiver + Send + 'static, - > Signer -{ /// This is a helper function to spawn both the runloop and event receiver in their own /// threads. Advanced signers may not need this method, and instead opt to run the receiver /// and runloop directly. However, this method is present to help signer developers to get diff --git a/libsigner/src/signer_set.rs b/libsigner/src/signer_set.rs deleted file mode 100644 index 119873fd1e4..00000000000 --- a/libsigner/src/signer_set.rs +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; -use hashbrown::{HashMap, HashSet}; -use stacks_common::types::chainstate::{StacksAddress, StacksPublicKey}; -use wsts::curve::ecdsa; -use wsts::curve::point::{Compressed, Point}; -use wsts::state_machine::PublicKeys; - -/// A reward set parsed into the structures required by WSTS party members and coordinators. -#[derive(Debug, Clone)] -pub struct SignerEntries { - /// The signer addresses mapped to signer id - pub signer_ids: HashMap, - /// The signer ids mapped to public key and key ids mapped to public keys - pub public_keys: PublicKeys, - /// The signer ids mapped to key ids - pub signer_key_ids: HashMap>, - /// The signer ids mapped to wsts public keys - pub signer_public_keys: HashMap, - /// The signer ids mapped to a hash set of key ids - /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups - pub coordinator_key_ids: HashMap>, -} - -/// Parsing errors for `SignerEntries` -#[derive(Debug)] -pub enum Error { - /// A member of the signing set has a signing key buffer - /// which does not represent a ecdsa public key. - BadSignerPublicKey(String), - /// The number of signers was greater than u32::MAX - SignerCountOverflow, -} - -impl SignerEntries { - /// Try to parse the reward set defined by `NakamotoSignEntry` into the structures required - /// by WSTS party members and coordinators. - pub fn parse(is_mainnet: bool, reward_set: &[NakamotoSignerEntry]) -> Result { - let mut weight_end = 1; - let mut signer_key_ids = HashMap::with_capacity(reward_set.len()); - let mut signer_public_keys = HashMap::with_capacity(reward_set.len()); - let mut coordinator_key_ids = HashMap::with_capacity(4000); - let mut signer_ids = HashMap::with_capacity(reward_set.len()); - let mut wsts_signers = HashMap::new(); - let mut wsts_key_ids = HashMap::new(); - for (i, entry) in reward_set.iter().enumerate() { - let signer_id = u32::try_from(i).map_err(|_| Error::SignerCountOverflow)?; - let ecdsa_pk = - ecdsa::PublicKey::try_from(entry.signing_key.as_slice()).map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to ecdsa::PublicKey: {e}" - )) - })?; - let signer_public_key = Point::try_from(&Compressed::from(ecdsa_pk.to_bytes())) - .map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to wsts::Point: {e}" - )) - })?; - let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) - .map_err(|e| { - Error::BadSignerPublicKey(format!( - "Failed to convert signing key to StacksPublicKey: {e}" - )) - })?; - - let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); - signer_ids.insert(stacks_address, signer_id); - - signer_public_keys.insert(signer_id, signer_public_key); - let weight_start = weight_end; - weight_end = weight_start + entry.weight; - let key_ids: HashSet = (weight_start..weight_end).collect(); - for key_id in key_ids.iter() { - wsts_key_ids.insert(*key_id, ecdsa_pk.clone()); - } - signer_key_ids.insert(signer_id, (weight_start..weight_end).collect()); - coordinator_key_ids.insert(signer_id, key_ids); - wsts_signers.insert(signer_id, ecdsa_pk); - } - - Ok(Self { - signer_ids, - public_keys: PublicKeys { - signers: wsts_signers, - key_ids: wsts_key_ids, - }, - signer_key_ids, - signer_public_keys, - coordinator_key_ids, - }) - } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_keys(&self) -> Result { - self.public_keys - .key_ids - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs in the WSTS group signature - pub fn count_signers(&self) -> Result { - self.public_keys - .signers - .len() - .try_into() - .map_err(|_| Error::SignerCountOverflow) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_signing_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 7_f64 / 10_f64).ceil() as u32) - } - - /// Return the number of Key IDs required to sign a message with the WSTS group signature - pub fn get_dkg_threshold(&self) -> Result { - let num_keys = self.count_keys()?; - Ok((num_keys as f64 * 9_f64 / 10_f64).ceil() as u32) - } -} diff --git a/libsigner/src/tests/http.rs b/libsigner/src/tests/http.rs index d2b052fae9b..ce4cbd902de 100644 --- a/libsigner/src/tests/http.rs +++ b/libsigner/src/tests/http.rs @@ -17,7 +17,7 @@ use std::io::{Read, Write}; use std::{io, str}; -use hashbrown::HashMap; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::chunked_encoding::*; use crate::error::{EventError, RPCError}; diff --git a/libsigner/src/tests/mod.rs b/libsigner/src/tests/mod.rs index 1d3e1f3cc0d..9f320b42fc1 100644 --- a/libsigner/src/tests/mod.rs +++ b/libsigner/src/tests/mod.rs @@ -135,12 +135,7 @@ fn test_simple_signer() { let ev = &thread_chunks[num_sent]; let body = serde_json::to_string(ev).unwrap(); - let req = format!( - "POST /stackerdb_chunks HTTP/1.1\r\nHost: {}\r\nConnection: close\r\nContent-Type: application/json\r\nContent-Length: {}\r\n\r\n{}", - endpoint, - &body.len(), - body - ); + let req = format!("POST /stackerdb_chunks HTTP/1.0\r\nConnection: close\r\nContent-Length: {}\r\n\r\n{}", &body.len(), body); debug!("Send:\n{}", &req); sock.write_all(req.as_bytes()).unwrap(); @@ -193,16 +188,13 @@ fn test_status_endpoint() { return; } }; - let req = format!( - "GET /status HTTP/1.1\r\nHost: {}\r\nConnection: close\r\n\r\n", - endpoint - ); + let req = "GET /status HTTP/1.0\r\nConnection: close\r\n\r\n"; sock.write_all(req.as_bytes()).unwrap(); let mut buf = [0; 128]; let _ = sock.read(&mut buf).unwrap(); let res_str = std::str::from_utf8(&buf).unwrap(); - let expected_status_res = "HTTP/1.1 200 OK\r\n"; + let expected_status_res = "HTTP/1.0 200 OK\r\n"; assert_eq!(expected_status_res, &res_str[..expected_status_res.len()]); sock.flush().unwrap(); }); diff --git a/libstackerdb/Cargo.toml b/libstackerdb/Cargo.toml index 0d54de5428b..53cf128edcf 100644 --- a/libstackerdb/Cargo.toml +++ b/libstackerdb/Cargo.toml @@ -26,8 +26,8 @@ clarity = { path = "../clarity" } version = "0.24.3" features = ["serde", "recovery"] -[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] +[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } -[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os = "windows")))'.dependencies] +[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), target_env = "msvc"))'.dependencies] sha2 = { version = "0.10" } diff --git a/libstackerdb/src/libstackerdb.rs b/libstackerdb/src/libstackerdb.rs index 8c38d8be7b5..df74075f64c 100644 --- a/libstackerdb/src/libstackerdb.rs +++ b/libstackerdb/src/libstackerdb.rs @@ -104,8 +104,6 @@ pub struct StackerDBChunkAckData { pub reason: Option, #[serde(skip_serializing_if = "Option::is_none")] pub metadata: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub code: Option, } impl SlotMetadata { @@ -194,12 +192,6 @@ impl StackerDBChunkData { Ok(()) } - pub fn recover_pk(&self) -> Result { - let digest = self.get_slot_metadata().auth_digest(); - StacksPublicKey::recover_to_pubkey(digest.as_bytes(), &self.sig) - .map_err(|ve| Error::VerifyingError(ve.to_string())) - } - /// Verify that this chunk was signed by the given /// public key hash (`addr`). Only fails if the underlying signing library fails. pub fn verify(&self, addr: &StacksAddress) -> Result { diff --git a/pox-locking/src/events.rs b/pox-locking/src/events.rs index 576d8d2c03d..e61ee916dc8 100644 --- a/pox-locking/src/events.rs +++ b/pox-locking/src/events.rs @@ -111,23 +111,10 @@ fn create_event_info_data_code( args: &[Value], response: &ResponseData, ) -> String { - // If a given burn block height is in a prepare phase, then the stacker will be in the _next_ reward cycle, so bump the cycle by 1 - // `prepare_offset` is 1 or 0, depending on whether current execution is in a prepare phase or not - let prepare_offset = r#" - (prepare-offset (if (< - (mod (- %height% (var-get first-burnchain-block-height)) (var-get pox-reward-cycle-length)) - (- (var-get pox-reward-cycle-length) (var-get pox-prepare-cycle-length)) - ) u0 u1)) - "#; - match function_name { "stack-stx" => { format!( r#" - (let ( - (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 {lock_period}))) - {prepare_offset} - ) {{ data: {{ ;; amount of ustx to lock. @@ -135,7 +122,7 @@ fn create_event_info_data_code( lock-amount: {lock_amount}, ;; burnchain height when the unlock finishes. ;; derived from args[3] - unlock-burn-height: unlock-burn-height, + unlock-burn-height: (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 {lock_period})), ;; PoX address tuple. ;; equal to args[1]. pox-addr: {pox_addr}, @@ -149,16 +136,8 @@ fn create_event_info_data_code( signer-sig: {signer_sig}, ;; equal to args[5] signer-key: {signer_key}, - ;; equal to args[6] - max-amount: {max_amount}, - ;; equal to args[7] - auth-id: {auth_id}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle unlock-burn-height)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), }} - }}) + }} "#, lock_amount = &args[0], lock_period = &args[3], @@ -166,18 +145,11 @@ fn create_event_info_data_code( start_burn_height = &args[2], signer_sig = &args.get(4).unwrap_or(&Value::none()), signer_key = &args.get(5).unwrap_or(&Value::none()), - max_amount = &args.get(6).unwrap_or(&Value::none()), - auth_id = &args.get(7).unwrap_or(&Value::none()), - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), ) } "delegate-stack-stx" => { format!( r#" - (let ( - (unlock-burn-height (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 {lock_period}))) - {prepare_offset} - ) {{ data: {{ ;; amount of ustx to lock. @@ -185,7 +157,7 @@ fn create_event_info_data_code( lock-amount: {lock_amount}, ;; burnchain height when the unlock finishes. ;; derived from args[4] - unlock-burn-height: unlock-burn-height, + unlock-burn-height: (reward-cycle-to-burn-height (+ (current-pox-reward-cycle) u1 {lock_period})), ;; PoX address tuple. ;; equal to args[2] pox-addr: {pox_addr}, @@ -200,28 +172,19 @@ fn create_event_info_data_code( ;; stacker ;; equal to args[0] stacker: '{stacker}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle unlock-burn-height)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), }} - }}) + }} "#, stacker = &args[0], lock_amount = &args[1], pox_addr = &args[2], start_burn_height = &args[3], lock_period = &args[4], - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), ) } "stack-increase" => { format!( r#" - (let ( - (unlock-height (get unlock-height (stx-account tx-sender))) - {prepare_offset} - ) {{ data: {{ ;; amount to increase by @@ -232,37 +195,16 @@ fn create_event_info_data_code( ;; derived from args[0] total-locked: (+ {increase_by} (get locked (stx-account tx-sender))), ;; pox addr increased - pox-addr: (get pox-addr (unwrap-panic (map-get? stacking-state {{ stacker: tx-sender }}))), - ;; signer sig (args[1]) - signer-sig: {signer_sig}, - ;; signer key (args[2]) - signer-key: {signer_key}, - ;; equal to args[3] - max-amount: {max_amount}, - ;; equal to args[4] - auth-id: {auth_id}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle unlock-height)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), + pox-addr: (get pox-addr (unwrap-panic (map-get? stacking-state {{ stacker: tx-sender }}))) }} - }}) + }} "#, - increase_by = &args[0], - signer_sig = &args.get(1).unwrap_or(&Value::none()), - signer_key = &args.get(2).unwrap_or(&Value::none()), - max_amount = &args.get(3).unwrap_or(&Value::none()), - auth_id = &args.get(4).unwrap_or(&Value::none()), - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), + increase_by = &args[0] ) } "delegate-stack-increase" => { format!( r#" - (let ( - (unlock-height (get unlock-height (stx-account '{stacker}))) - {prepare_offset} - ) {{ data: {{ ;; pox addr @@ -279,18 +221,13 @@ fn create_event_info_data_code( delegator: tx-sender, ;; stacker ;; equal to args[0] - stacker: '{stacker}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle unlock-height)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), + stacker: '{stacker} }} }} "#, stacker = &args[0], pox_addr = &args[1], increase_by = &args[2], - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), ) } "stack-extend" => { @@ -307,7 +244,6 @@ fn create_event_info_data_code( unlock-in-cycle)) (last-extend-cycle (- (+ first-extend-cycle {extend_count}) u1)) (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle))) - {prepare_offset} ) {{ data: {{ @@ -323,14 +259,6 @@ fn create_event_info_data_code( signer-sig: {signer_sig}, ;; equal to args[3] signer-key: {signer_key}, - ;; equal to args[4] - max-amount: {max_amount}, - ;; equal to args[5] - auth-id: {auth_id}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle new-unlock-ht)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), }} }}) "#, @@ -338,9 +266,6 @@ fn create_event_info_data_code( pox_addr = &args[1], signer_sig = &args.get(2).unwrap_or(&Value::none()), signer_key = &args.get(3).map_or("none".to_string(), |v| v.to_string()), - max_amount = &args.get(4).unwrap_or(&Value::none()), - auth_id = &args.get(5).unwrap_or(&Value::none()), - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), ) } "delegate-stack-extend" => { @@ -356,7 +281,6 @@ fn create_event_info_data_code( unlock-in-cycle)) (last-extend-cycle (- (+ first-extend-cycle {extend_count}) u1)) (new-unlock-ht (reward-cycle-to-burn-height (+ u1 last-extend-cycle))) - {prepare_offset} ) {{ data: {{ @@ -372,18 +296,13 @@ fn create_event_info_data_code( delegator: tx-sender, ;; stacker ;; equal to args[0] - stacker: '{stacker}, - ;; Get end cycle ID - end-cycle-id: (some (burn-height-to-reward-cycle new-unlock-ht)), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), + stacker: '{stacker} }} }}) "#, stacker = &args[0], pox_addr = &args[1], - extend_count = &args[2], - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), + extend_count = &args[2] ) } "stack-aggregation-commit" | "stack-aggregation-commit-indexed" => { @@ -407,14 +326,6 @@ fn create_event_info_data_code( signer-sig: {signer_sig}, ;; equal to args[3] signer-key: {signer_key}, - ;; equal to args[4] - max-amount: {max_amount}, - ;; equal to args[5] - auth-id: {auth_id}, - ;; Get end cycle ID - end-cycle-id: (some {reward_cycle}), - ;; Get start cycle ID - start-cycle-id: {reward_cycle}, }} }} "#, @@ -422,8 +333,6 @@ fn create_event_info_data_code( reward_cycle = &args[1], signer_sig = &args.get(2).unwrap_or(&Value::none()), signer_key = &args.get(3).unwrap_or(&Value::none()), - max_amount = &args.get(4).unwrap_or(&Value::none()), - auth_id = &args.get(5).unwrap_or(&Value::none()), ) } "stack-aggregation-increase" => { @@ -444,11 +353,7 @@ fn create_event_info_data_code( ;; delegator (this is the caller) delegator: tx-sender, ;; equal to args[2] - reward-cycle-index: {reward_cycle_index}, - ;; Get end cycle ID - end-cycle-id: (some {reward_cycle}), - ;; Get start cycle ID - start-cycle-id: {reward_cycle}, + reward-cycle-index: {reward_cycle_index} }} }} "#, @@ -460,9 +365,6 @@ fn create_event_info_data_code( "delegate-stx" => { format!( r#" - (let ( - {prepare_offset} - ) {{ data: {{ ;; amount of ustx to delegate. @@ -476,48 +378,32 @@ fn create_event_info_data_code( unlock-burn-height: {until_burn_height}, ;; optional PoX address tuple. ;; equal to args[3]. - pox-addr: {pox_addr}, - ;; Get end cycle ID - end-cycle-id: (match {until_burn_height} - height (some (burn-height-to-reward-cycle height)) - none - ), - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1 prepare-offset), + pox-addr: {pox_addr} }} - }}) + }} "#, amount_ustx = &args[0], delegate_to = &args[1], until_burn_height = &args[2], pox_addr = &args[3], - prepare_offset = prepare_offset.replace("%height%", "burn-block-height"), ) } "revoke-delegate-stx" => { if let Value::Optional(opt) = *response.data.clone() { - eprintln!("Response data in revoke-delegate-stx is: {:?}", opt.data); format!( r#" {{ - data: {{ - delegate-to: '{delegate_to}, - ;; Get end cycle ID - end-cycle-id: none, - ;; Get start cycle ID - start-cycle-id: (+ (current-pox-reward-cycle) u1), - }}, + data: {{ delegate-to: '{delegate_to} }} }} "#, delegate_to = opt .data - .clone() .map(|boxed_value| *boxed_value) .unwrap() .expect_tuple() .expect("FATAL: unexpected clarity value") .get("delegated-to") - .unwrap(), + .unwrap() ) } else { "{data: {unimplemented: true}}".into() diff --git a/stacks-common/Cargo.toml b/stacks-common/Cargo.toml index d9f987f5749..8d61e9b29a6 100644 --- a/stacks-common/Cargo.toml +++ b/stacks-common/Cargo.toml @@ -33,6 +33,7 @@ chrono = "0.4.19" libc = "0.2.82" wsts = { workspace = true } hashbrown = { workspace = true } +proptest = { workspace = true, optional = true } [target.'cfg(unix)'.dependencies] nix = "0.23" @@ -71,15 +72,16 @@ rstest = "0.11.0" rstest_reuse = "0.1.3" assert-json-diff = "1.0.0" rand_core = { workspace = true } +stacks-common = { path = ".", features = ["testing"] } [features] default = ["developer-mode"] developer-mode = [] slog_json = ["slog-json"] -testing = [] +testing = ["dep:proptest"] -[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] +[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } -[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os="windows")))'.dependencies] +[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), target_env = "msvc"))'.dependencies] sha2 = { version = "0.10" } diff --git a/stacks-common/src/deps_common/bitcoin/network/encodable.rs b/stacks-common/src/deps_common/bitcoin/network/encodable.rs index f14ee1fb854..9e941db2c3d 100644 --- a/stacks-common/src/deps_common/bitcoin/network/encodable.rs +++ b/stacks-common/src/deps_common/bitcoin/network/encodable.rs @@ -32,10 +32,9 @@ use std::hash::Hash; use std::{mem, u32}; -use hashbrown::HashMap; - use crate::deps_common::bitcoin::network::serialize::{self, SimpleDecoder, SimpleEncoder}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; +use crate::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; /// Maximum size, in bytes, of a vector we are allowed to decode pub const MAX_VEC_SIZE: usize = 64 * 1024 * 1024; diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 0a9fa9d641c..234f4375e95 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -35,6 +35,13 @@ pub mod bitvec; use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId}; +#[cfg(any(test, feature = "testing"))] +#[macro_use] +extern crate proptest; + +#[cfg(any(test, feature = "testing"))] +pub mod proptesting; + pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; @@ -62,5 +69,5 @@ pub mod consts { /// The number of StackerDB slots each signing key needs /// to use to participate in DKG and block validation signing. - pub const SIGNER_SLOTS_PER_USER: u32 = 13; + pub const SIGNER_SLOTS_PER_USER: u32 = 12; } diff --git a/stacks-common/src/proptesting/hashmap.rs b/stacks-common/src/proptesting/hashmap.rs new file mode 100644 index 00000000000..a5888ebfd41 --- /dev/null +++ b/stacks-common/src/proptesting/hashmap.rs @@ -0,0 +1,100 @@ +use std::hash::Hash; + +use proptest::collection::{SizeRange, VecStrategy, VecValueTree}; +use proptest::strategy::{statics, NewTree, Strategy, ValueTree}; +use proptest::test_runner::TestRunner; +use proptest::tuple::TupleValueTree; + +use crate::types::StacksHashMap; + +#[derive(Debug, Clone, Copy)] +struct MinSize(usize); + +#[derive(Clone, Debug)] +pub struct StacksHashMapStrategy( + statics::Filter, VecToStacksHashMap>, MinSize>, +) +where + K: Strategy, + V: Strategy, + K::Value: Hash + Eq; + +#[derive(Clone, Debug)] +pub struct StacksHashMapValueTree( + statics::Filter< + statics::Map>, VecToStacksHashMap>, + MinSize, + >, +) +where + K: ValueTree, + V: ValueTree, + K::Value: Hash + Eq; + +impl Strategy for StacksHashMapStrategy +where + K: Strategy, + V: Strategy, + K::Value: Hash + Eq, +{ + type Tree = StacksHashMapValueTree; + type Value = StacksHashMap; + fn new_tree(&self, runner: &mut TestRunner) -> NewTree { + self.0.new_tree(runner).map(StacksHashMapValueTree) + } +} + +impl ValueTree for StacksHashMapValueTree +where + K: ValueTree, + V: ValueTree, + K::Value: Hash + Eq, +{ + type Value = StacksHashMap; + fn current(&self) -> Self::Value { + self.0.current() + } + fn simplify(&mut self) -> bool { + self.0.simplify() + } + fn complicate(&mut self) -> bool { + self.0.complicate() + } +} + +#[derive(Clone, Copy, Debug)] +struct VecToStacksHashMap; + +impl statics::MapFn> + for VecToStacksHashMap +{ + type Output = StacksHashMap; + fn apply(&self, vec: Vec<(K, V)>) -> StacksHashMap { + vec.into_iter().collect() + } +} + +pub fn stacks_hash_map( + key: K, + value: V, + size: impl Into, +) -> StacksHashMapStrategy +where + K::Value: Hash + Eq, +{ + let size = size.into(); + StacksHashMapStrategy(statics::Filter::new( + statics::Map::new( + proptest::collection::vec((key, value), size.clone()), + VecToStacksHashMap, + ), + "HashMap minimum size".into(), + MinSize(size.start()), + )) +} + +impl statics::FilterFn> for MinSize { + fn apply(&self, map: &StacksHashMap) -> bool { + map.len() >= self.0 + } +} diff --git a/stacks-common/src/proptesting/hashset.rs b/stacks-common/src/proptesting/hashset.rs new file mode 100644 index 00000000000..c84f4c8cccb --- /dev/null +++ b/stacks-common/src/proptesting/hashset.rs @@ -0,0 +1,90 @@ +use std::hash::Hash; + +use proptest::collection::{SizeRange, VecStrategy, VecValueTree}; +use proptest::prelude::*; +use proptest::strategy::{statics, NewTree, ValueTree}; +use proptest::test_runner::TestRunner; + +use crate::types::StacksHashSet; + +#[derive(Clone, Copy, Debug)] +struct MinSize(usize); + +#[derive(Clone, Copy, Debug)] +struct VecToStacksHashSet; + +impl statics::MapFn> for VecToStacksHashSet { + type Output = StacksHashSet; + fn apply(&self, vec: Vec) -> StacksHashSet { + vec.into_iter().collect() + } +} + +#[derive(Clone, Debug)] +pub struct StacksHashSetStrategy( + statics::Filter, VecToStacksHashSet>, MinSize>, +) +where + T: Strategy, + T::Value: Hash + Eq; + +#[derive(Clone, Debug)] +pub struct StacksHashSetValueTree( + statics::Filter, VecToStacksHashSet>, MinSize>, +) +where + T: ValueTree, + T::Value: Hash + Eq; + +impl Strategy for StacksHashSetStrategy +where + T: Strategy, + T::Value: Hash + Eq, +{ + type Tree = StacksHashSetValueTree; + type Value = StacksHashSet; + fn new_tree(&self, runner: &mut TestRunner) -> NewTree { + self.0.new_tree(runner).map(StacksHashSetValueTree) + } +} + +impl ValueTree for StacksHashSetValueTree +where + T: ValueTree, + T::Value: Hash + Eq, +{ + type Value = StacksHashSet; + fn current(&self) -> Self::Value { + self.0.current() + } + fn simplify(&mut self) -> bool { + self.0.simplify() + } + fn complicate(&mut self) -> bool { + self.0.complicate() + } +} + +impl statics::FilterFn> for MinSize { + fn apply(&self, set: &StacksHashSet) -> bool { + set.len() >= self.0 + } +} + +pub fn stacks_hash_set( + element: T, + size: impl Into, +) -> StacksHashSetStrategy +where + T::Value: Hash + Eq, +{ + let size = size.into(); + StacksHashSetStrategy(statics::Filter::new( + statics::Map::new( + proptest::collection::vec(element, size.clone()), + VecToStacksHashSet, + ), + "HashSet minimum size".into(), + MinSize(size.start()), + )) +} diff --git a/stacks-common/src/proptesting/mod.rs b/stacks-common/src/proptesting/mod.rs new file mode 100644 index 00000000000..3e0db468637 --- /dev/null +++ b/stacks-common/src/proptesting/mod.rs @@ -0,0 +1,37 @@ +pub mod hashmap; +pub mod hashset; +pub mod types; + +pub use hashmap::stacks_hash_map; +pub use hashset::stacks_hash_set; +use proptest::prelude::*; +use proptest::sample::SizeRange; +pub use types::*; + +use crate::util::hash::{to_hex, Hash160, Sha512Trunc256Sum}; + +pub fn sha_512_trunc_256_sum() -> impl Strategy { + prop::collection::vec(any::(), 32..=32).prop_map(|vec| { + let arr: [u8; 32] = vec.try_into().expect("failed to generate 32-byte array"); + + Sha512Trunc256Sum::from(arr) + }) +} + +/// Generate a random hex string representing a byte array of the given length. +/// i.e. the string will be `2 * byte_len` characters long. +pub fn hex_string(byte_len: impl Into) -> impl Strategy { + prop::collection::vec(any::(), byte_len).prop_map(|vec| to_hex(&vec)) +} + +pub fn bytes(len: impl Into) -> impl Strategy> { + prop::collection::vec(any::(), len) +} + +pub fn hash_160() -> impl Strategy { + prop::collection::vec(any::(), 20..=20).prop_map(|vec| { + let arr: [u8; 20] = vec.try_into().expect("failed to generate 20-byte array"); + + Hash160(arr) + }) +} diff --git a/stacks-common/src/proptesting/types.rs b/stacks-common/src/proptesting/types.rs new file mode 100644 index 00000000000..d468ddfc3d9 --- /dev/null +++ b/stacks-common/src/proptesting/types.rs @@ -0,0 +1,51 @@ +use std::any; + +use proptest::prelude::*; + +use super::bytes; +use crate::types::chainstate::{StacksAddress, StacksBlockId}; +use crate::types::net::{PeerAddress, PeerHost}; +use crate::types::StacksPublicKeyBuffer; +use crate::util::hash::Hash160; + +pub fn stacks_public_key_buffer() -> impl Strategy { + bytes(33).prop_map(|vec| { + let arr: [u8; 33] = vec.try_into().expect("failed to generate 33-byte array"); + + StacksPublicKeyBuffer::from(arr) + }) +} + +pub fn stacks_address() -> impl Strategy { + bytes(20).prop_map(|vec| { + let arr: [u8; 20] = vec.try_into().expect("failed to generate 20-byte array"); + + StacksAddress { + version: 1, + bytes: Hash160(arr), + } + }) +} + +pub fn peer_address() -> impl Strategy { + bytes(16).prop_map(|vec| { + let arr: [u8; 16] = vec.try_into().expect("failed to generate 16-byte array"); + + PeerAddress(arr) + }) +} + +pub fn peer_host() -> impl Strategy { + prop_oneof![ + (peer_address(), any::()).prop_map(|(peer, port)| PeerHost::IP(peer, port)), + (any::(), any::()).prop_map(|(host, port)| PeerHost::DNS(host, port)) + ] +} + +pub fn stacks_block_id() -> impl Strategy { + bytes(32).prop_map(|vec| { + let arr: [u8; 32] = vec.try_into().expect("failed to generate 32-byte array"); + + StacksBlockId(arr) + }) +} diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 2652347273b..2f011b63a39 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -15,6 +15,9 @@ use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; pub mod chainstate; pub mod net; +pub type StacksHashMap = hashbrown::HashMap; +pub type StacksHashSet = hashbrown::HashSet; + /// A container for public keys (compressed secp256k1 public keys) pub struct StacksPublicKeyBuffer(pub [u8; 33]); impl_array_newtype!(StacksPublicKeyBuffer, u8, 33); @@ -79,21 +82,6 @@ impl StacksEpochId { StacksEpochId::Epoch30 } - /// Returns whether or not this Epoch should perform - /// memory checks during analysis - pub fn analysis_memory(&self) -> bool { - match self { - StacksEpochId::Epoch10 - | StacksEpochId::Epoch20 - | StacksEpochId::Epoch2_05 - | StacksEpochId::Epoch21 - | StacksEpochId::Epoch22 - | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => false, - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, - } - } - /// Returns whether or not this Epoch should perform /// Clarity value sanitization pub fn value_sanitizing(&self) -> bool { @@ -107,16 +95,6 @@ impl StacksEpochId { StacksEpochId::Epoch24 | StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => true, } } - - /// Does this epoch support unlocking PoX contributors that miss a slot? - /// - /// Epoch 2.0 - 2.05 didn't support this feature, but they weren't epoch-guarded on it. Instead, - /// the behavior never activates in those epochs because the Pox1 contract does not provide - /// `contibuted_stackers` information. This check maintains that exact semantics by returning - /// true for all epochs before 2.5. For 2.5 and after, this returns false. - pub fn supports_pox_missed_slot_unlocks(&self) -> bool { - self < &StacksEpochId::Epoch25 - } } impl std::fmt::Display for StacksEpochId { diff --git a/stacks-common/src/util/macros.rs b/stacks-common/src/util/macros.rs index 57ce30ad9c8..cd2578e9c5b 100644 --- a/stacks-common/src/util/macros.rs +++ b/stacks-common/src/util/macros.rs @@ -210,25 +210,16 @@ macro_rules! guarded_string { /// gives you a try_from(u8) -> Option function #[macro_export] macro_rules! define_u8_enum { - ($(#[$outer:meta])* - $Name:ident { - $( - $(#[$inner:meta])* - $Variant:ident = $Val:literal),+ - }) => + ($Name:ident { $($Variant:ident = $Val:literal),+ }) => { #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[repr(u8)] - $(#[$outer])* pub enum $Name { - $( $(#[$inner])* - $Variant = $Val),*, + $($Variant = $Val),*, } impl $Name { - /// All members of the enum pub const ALL: &'static [$Name] = &[$($Name::$Variant),*]; - /// Return the u8 representation of the variant pub fn to_u8(&self) -> u8 { match self { $( @@ -237,8 +228,6 @@ macro_rules! define_u8_enum { } } - /// Returns Some and the variant if `v` is a u8 corresponding to a variant in this enum. - /// Returns None otherwise pub fn from_u8(v: u8) -> Option { match v { $( diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index d4dfcda82f0..97cbc4104fd 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -27,7 +27,6 @@ pub mod secp256k1; pub mod uint; pub mod vrf; -use std::collections::HashMap; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 0274f41b025..5d1a5f5aebf 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -346,10 +346,6 @@ impl Secp256k1PrivateKey { } to_hex(&bytes) } - - pub fn as_slice(&self) -> &[u8; 32] { - self.key.as_ref() - } } impl PrivateKey for Secp256k1PrivateKey { diff --git a/stacks-signer/Cargo.toml b/stacks-signer/Cargo.toml index 57b2e808049..f6fb9ce80c6 100644 --- a/stacks-signer/Cargo.toml +++ b/stacks-signer/Cargo.toml @@ -46,12 +46,6 @@ url = "2.1.0" [dev-dependencies] clarity = { path = "../clarity", features = ["testing"] } -polynomial = "0.2.6" -num-traits = "0.2.18" - -[dependencies.rusqlite] -version = "=0.24.2" -features = ["blob", "serde_json", "i128_blob", "bundled", "trace"] [dependencies.serde_json] version = "1.0" @@ -60,3 +54,6 @@ features = ["arbitrary_precision", "unbounded_depth"] [dependencies.secp256k1] version = "0.24.3" features = ["serde", "recovery"] + +[features] +portable = [] diff --git a/stacks-signer/src/cli.rs b/stacks-signer/src/cli.rs index b4601e82eeb..a9afa338273 100644 --- a/stacks-signer/src/cli.rs +++ b/stacks-signer/src/cli.rs @@ -19,13 +19,9 @@ use std::path::PathBuf; use blockstack_lib::chainstate::stacks::address::PoxAddress; use blockstack_lib::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; -use clap::{ArgAction, Parser, ValueEnum}; +use clap::{Parser, ValueEnum}; use clarity::vm::types::QualifiedContractIdentifier; -use stacks_common::address::{ - b58, AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, - C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_MULTISIG, - C32_ADDRESS_VERSION_TESTNET_SINGLESIG, -}; +use stacks_common::address::b58; use stacks_common::types::chainstate::StacksPrivateKey; use crate::config::Network; @@ -59,7 +55,7 @@ pub enum Command { /// Run a DKG round through the stacker-db instance Dkg(RunDkgArgs), /// Run the signer, waiting for events from the stacker-db instance - Run(RunSignerArgs), + Run(RunDkgArgs), /// Generate necessary files for running a collection of signers GenerateFiles(GenerateFilesArgs), /// Generate a signature for Stacking transactions @@ -128,7 +124,7 @@ pub struct PutChunkArgs { /// Arguments for the dkg-sign and sign command pub struct SignArgs { /// Path to config file - #[arg(long, short, value_name = "FILE")] + #[arg(long, value_name = "FILE")] pub config: PathBuf, /// The reward cycle the signer is registered for and wants to sign for /// Note: this must be the current reward cycle of the node @@ -142,24 +138,16 @@ pub struct SignArgs { } #[derive(Parser, Debug, Clone)] -/// Arguments for the Dkg command +/// Arguments for the Run and Dkg commands pub struct RunDkgArgs { /// Path to config file - #[arg(long, short, value_name = "FILE")] + #[arg(long, value_name = "FILE")] pub config: PathBuf, /// The reward cycle the signer is registered for and wants to peform DKG for #[arg(long, short)] pub reward_cycle: u64, } -#[derive(Parser, Debug, Clone)] -/// Arguments for the Run command -pub struct RunSignerArgs { - /// Path to config file - #[arg(long, short, value_name = "FILE")] - pub config: PathBuf, -} - #[derive(Parser, Debug, Clone)] /// Arguments for the generate-files command pub struct GenerateFilesArgs { @@ -196,14 +184,14 @@ pub struct StackingSignatureMethod(Pox4SignatureTopic); impl StackingSignatureMethod { /// Get the inner `Pox4SignatureTopic` - pub const fn topic(&self) -> &Pox4SignatureTopic { + pub fn topic(&self) -> &Pox4SignatureTopic { &self.0 } } impl From for StackingSignatureMethod { fn from(topic: Pox4SignatureTopic) -> Self { - Self(topic) + StackingSignatureMethod(topic) } } @@ -214,9 +202,9 @@ impl ValueEnum for StackingSignatureMethod { fn value_variants<'a>() -> &'a [Self] { &[ - Self(Pox4SignatureTopic::StackStx), - Self(Pox4SignatureTopic::StackExtend), - Self(Pox4SignatureTopic::AggregationCommit), + StackingSignatureMethod(Pox4SignatureTopic::StackStx), + StackingSignatureMethod(Pox4SignatureTopic::StackExtend), + StackingSignatureMethod(Pox4SignatureTopic::AggregationCommit), ] } @@ -238,14 +226,13 @@ pub struct GenerateStackingSignatureArgs { /// BTC address used to receive rewards #[arg(short, long, value_parser = parse_pox_addr)] pub pox_address: PoxAddress, - /// The reward cycle during which this signature - /// can be used + /// The reward cycle to be used in the signature's message hash #[arg(short, long)] pub reward_cycle: u64, - /// Path to signer config file - #[arg(long, short, value_name = "FILE")] + /// Path to config file + #[arg(long, value_name = "FILE")] pub config: PathBuf, - /// Stacking method that can be used + /// Topic for signature #[arg(long)] pub method: StackingSignatureMethod, /// Number of cycles used as a lock period. @@ -258,9 +245,6 @@ pub struct GenerateStackingSignatureArgs { /// A unique identifier to prevent re-using this authorization #[arg(long)] pub auth_id: u128, - /// Output information in JSON format - #[arg(long, action=ArgAction::SetTrue, required=false)] - pub json: bool, } /// Parse the contract ID @@ -268,25 +252,12 @@ fn parse_contract(contract: &str) -> Result QualifiedContractIdentifier::parse(contract).map_err(|e| format!("Invalid contract: {}", e)) } -/// Parse a BTC address argument and return a `PoxAddress`. -/// This function behaves similarly to `PoxAddress::from_b58`, but also handles -/// addresses where the parsed AddressHashMode is None. +/// Parse a BTC address argument and return a `PoxAddress` pub fn parse_pox_addr(pox_address_literal: &str) -> Result { - let parsed_addr = PoxAddress::from_b58(pox_address_literal).map_or_else( - || Err(format!("Invalid pox address: {pox_address_literal}")), - Ok, - ); - match parsed_addr { - Ok(PoxAddress::Standard(addr, None)) => match addr.version { - C32_ADDRESS_VERSION_MAINNET_MULTISIG | C32_ADDRESS_VERSION_TESTNET_MULTISIG => Ok( - PoxAddress::Standard(addr, Some(AddressHashMode::SerializeP2SH)), - ), - C32_ADDRESS_VERSION_MAINNET_SINGLESIG | C32_ADDRESS_VERSION_TESTNET_SINGLESIG => Ok( - PoxAddress::Standard(addr, Some(AddressHashMode::SerializeP2PKH)), - ), - _ => Err(format!("Invalid address version: {}", addr.version)), - }, - _ => parsed_addr, + if let Some(pox_address) = PoxAddress::from_b58(pox_address_literal) { + Ok(pox_address) + } else { + Err(format!("Invalid pox address: {}", pox_address_literal)) } } @@ -328,49 +299,13 @@ fn parse_network(network: &str) -> Result { #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::address::{PoxAddressType20, PoxAddressType32}; - use blockstack_lib::util_lib::signed_structured_data::pox4::make_pox_4_signer_key_message_hash; - use clarity::consts::CHAIN_ID_TESTNET; - use clarity::util::hash::Sha256Sum; use super::*; - /// Helper just to ensure that a the pox address - /// can be turned into a clarity tuple - fn make_message_hash(pox_addr: &PoxAddress) -> Sha256Sum { - make_pox_4_signer_key_message_hash( - pox_addr, - 0, - &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, - 0, - 0, - 0, - ) - } - - fn clarity_tuple_version(pox_addr: &PoxAddress) -> u8 { - *pox_addr - .as_clarity_tuple() - .expect("Failed to generate clarity tuple for pox address") - .get("version") - .expect("Expected version in clarity tuple") - .clone() - .expect_buff(1) - .expect("Expected version to be a u128") - .first() - .expect("Expected version to be a uint") - } - #[test] fn test_parse_pox_addr() { let tr = "bc1p8vg588hldsnv4a558apet4e9ff3pr4awhqj2hy8gy6x2yxzjpmqsvvpta4"; let pox_addr = parse_pox_addr(tr).expect("Failed to parse segwit address"); - assert_eq!(tr, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - assert_eq!( - clarity_tuple_version(&pox_addr), - PoxAddressType32::P2TR.to_u8() - ); match pox_addr { PoxAddress::Addr32(_, addr_type, _) => { assert_eq!(addr_type, PoxAddressType32::P2TR); @@ -380,60 +315,26 @@ mod tests { let legacy = "1N8GMS991YDY1E696e9SB9EsYY5ckSU7hZ"; let pox_addr = parse_pox_addr(legacy).expect("Failed to parse legacy address"); - assert_eq!(legacy, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - assert_eq!( - clarity_tuple_version(&pox_addr), - AddressHashMode::SerializeP2PKH as u8 - ); match pox_addr { PoxAddress::Standard(stacks_addr, hash_mode) => { assert_eq!(stacks_addr.version, 22); - assert_eq!(hash_mode, Some(AddressHashMode::SerializeP2PKH)); + assert!(hash_mode.is_none()); } _ => panic!("Invalid parsed address"), } let p2sh = "33JNgVMNMC9Xm6mJG9oTVf5zWbmt5xi1Mv"; let pox_addr = parse_pox_addr(p2sh).expect("Failed to parse legacy address"); - assert_eq!(p2sh, pox_addr.clone().to_b58()); - assert_eq!( - clarity_tuple_version(&pox_addr), - AddressHashMode::SerializeP2SH as u8 - ); - make_message_hash(&pox_addr); match pox_addr { PoxAddress::Standard(stacks_addr, hash_mode) => { assert_eq!(stacks_addr.version, 20); - assert_eq!(hash_mode, Some(AddressHashMode::SerializeP2SH)); - } - _ => panic!("Invalid parsed address"), - } - - let testnet_p2pkh = "mnr5asd1MLSutHLL514WZXNpUNN3L98zBc"; - let pox_addr = parse_pox_addr(testnet_p2pkh).expect("Failed to parse testnet address"); - assert_eq!( - clarity_tuple_version(&pox_addr), - AddressHashMode::SerializeP2PKH as u8 - ); - assert_eq!(testnet_p2pkh, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - match pox_addr { - PoxAddress::Standard(stacks_addr, hash_mode) => { - assert_eq!(stacks_addr.version, C32_ADDRESS_VERSION_TESTNET_SINGLESIG); - assert_eq!(hash_mode, Some(AddressHashMode::SerializeP2PKH)); + assert!(hash_mode.is_none()); } _ => panic!("Invalid parsed address"), } let wsh = "bc1qvnpcphdctvmql5gdw6chtwvvsl6ra9gwa2nehc99np7f24juc4vqrx29cs"; let pox_addr = parse_pox_addr(wsh).expect("Failed to parse segwit address"); - assert_eq!( - clarity_tuple_version(&pox_addr), - PoxAddressType32::P2WSH.to_u8() - ); - assert_eq!(wsh, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); match pox_addr { PoxAddress::Addr32(_, addr_type, _) => { assert_eq!(addr_type, PoxAddressType32::P2WSH); @@ -441,44 +342,8 @@ mod tests { _ => panic!("Invalid parsed address"), } - let wpkh = "bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t4"; + let wpkh = "BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4"; let pox_addr = parse_pox_addr(wpkh).expect("Failed to parse segwit address"); - assert_eq!( - clarity_tuple_version(&pox_addr), - PoxAddressType20::P2WPKH.to_u8() - ); - assert_eq!(wpkh, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - match pox_addr { - PoxAddress::Addr20(_, addr_type, _) => { - assert_eq!(addr_type, PoxAddressType20::P2WPKH); - } - _ => panic!("Invalid parsed address"), - } - - let testnet_tr = "tb1p46cgptxsfwkqpnnj552rkae3nf6l52wxn4snp4vm6mcrz2585hwq6cdwf2"; - let pox_addr = parse_pox_addr(testnet_tr).expect("Failed to parse testnet address"); - assert_eq!(testnet_tr, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - assert_eq!( - clarity_tuple_version(&pox_addr), - PoxAddressType32::P2TR.to_u8() - ); - match pox_addr { - PoxAddress::Addr32(_, addr_type, _) => { - assert_eq!(addr_type, PoxAddressType32::P2TR); - } - _ => panic!("Invalid parsed address"), - } - - let testnet_segwit = "tb1q38eleudmqyg4jrm39dnudj23pv6jcjrksa437s"; - let pox_addr = parse_pox_addr(testnet_segwit).expect("Failed to parse testnet address"); - assert_eq!(testnet_segwit, pox_addr.clone().to_b58()); - make_message_hash(&pox_addr); - assert_eq!( - clarity_tuple_version(&pox_addr), - PoxAddressType20::P2WPKH.to_u8() - ); match pox_addr { PoxAddress::Addr20(_, addr_type, _) => { assert_eq!(addr_type, PoxAddressType20::P2WPKH); diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index 3e1da7b20c3..1a378eeedff 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -23,6 +23,7 @@ use std::time::Duration; use clarity::vm::errors::Error as ClarityError; use clarity::vm::types::serialization::SerializationError; +use libsigner::RPCError; use libstackerdb::Error as StackerDBError; use slog::slog_debug; pub use stackerdb::*; @@ -47,6 +48,9 @@ pub enum ClientError { /// Failed to sign stacker-db chunk #[error("Failed to sign stacker-db chunk: {0}")] FailToSign(#[from] StackerDBError), + /// Failed to write to stacker-db due to RPC error + #[error("Failed to write to stacker-db instance: {0}")] + PutChunkFailed(#[from] RPCError), /// Stacker-db instance rejected the chunk #[error("Stacker-db rejected the chunk. Reason: {0}")] PutChunkRejected(String), @@ -68,18 +72,33 @@ pub enum ClientError { /// Failed to parse a Clarity value #[error("Received a malformed clarity value: {0}")] MalformedClarityValue(String), + /// Invalid Clarity Name + #[error("Invalid Clarity Name: {0}")] + InvalidClarityName(String), /// Backoff retry timeout #[error("Backoff retry timeout occurred. Stacks node may be down.")] RetryTimeout, /// Not connected #[error("Not connected")] NotConnected, + /// Invalid signing key + #[error("Signing key not represented in the list of signers")] + InvalidSigningKey, /// Clarity interpreter error #[error("Clarity interpreter error: {0}")] ClarityError(#[from] ClarityError), + /// Our stacks address does not belong to a registered signer + #[error("Our stacks address does not belong to a registered signer")] + NotRegistered, + /// Reward set not yet calculated for the given reward cycle + #[error("Reward set not yet calculated for reward cycle: {0}")] + RewardSetNotYetCalculated(u64), /// Malformed reward set #[error("Malformed contract data: {0}")] MalformedContractData(String), + /// No reward set exists for the given reward cycle + #[error("No reward set exists for reward cycle {0}")] + NoRewardSet(u64), /// Stacks node does not support a feature we need #[error("Stacks node does not support a required feature: {0}")] UnsupportedStacksFeature(String), @@ -116,20 +135,19 @@ pub(crate) mod tests { use blockstack_lib::net::api::getpoxinfo::{ RPCPoxCurrentCycleInfo, RPCPoxEpoch, RPCPoxInfoData, RPCPoxNextCycleInfo, }; - use blockstack_lib::net::api::postfeerate::{RPCFeeEstimate, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::boot_code_id; use clarity::vm::costs::ExecutionCost; use clarity::vm::types::TupleData; use clarity::vm::Value as ClarityValue; - use hashbrown::{HashMap, HashSet}; - use libsigner::SignerEntries; use rand::distributions::Standard; use rand::{thread_rng, Rng}; use rand_core::{OsRng, RngCore}; use stacks_common::types::chainstate::{ BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; - use stacks_common::types::{StacksEpochId, StacksPublicKeyBuffer}; + use stacks_common::types::{ + StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet, StacksPublicKeyBuffer, + }; use stacks_common::util::hash::{Hash160, Sha256Sum}; use wsts::curve::ecdsa; use wsts::curve::point::{Compressed, Point}; @@ -137,7 +155,7 @@ pub(crate) mod tests { use wsts::state_machine::PublicKeys; use super::*; - use crate::config::{GlobalConfig, SignerConfig}; + use crate::config::{GlobalConfig, ParsedSignerEntries, SignerConfig}; use crate::signer::SignerSlotID; pub struct MockServerClient { @@ -231,7 +249,7 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{account_nonce_entry_json}") } - /// Build a response to get_pox_data_with_retry where it returns a specific reward cycle id and block height + /// Build a response to get_pox_data where it returns a specific reward cycle id and block height pub fn build_get_pox_data_response( reward_cycle: Option, prepare_phase_start_height: Option, @@ -347,7 +365,7 @@ pub(crate) mod tests { build_read_only_response(&clarity_value) } - /// Build a response for the get_peer_info_with_retry request with a specific stacks tip height and consensus hash + /// Build a response for the get_peer_info request with a specific stacks tip height and consensus hash pub fn build_get_peer_info_response( burn_block_height: Option, pox_consensus_hash: Option, @@ -399,44 +417,6 @@ pub(crate) mod tests { format!("HTTP/1.1 200 OK\n\n{{\"okay\":true,\"result\":\"{hex}\"}}") } - /// Build a response for the get_medium_estimated_fee_ustx_response request with a specific medium estimate - pub fn build_get_medium_estimated_fee_ustx_response( - medium_estimate: u64, - ) -> (String, RPCFeeEstimateResponse) { - // Generate some random info - let fee_response = RPCFeeEstimateResponse { - estimated_cost: ExecutionCost { - write_length: thread_rng().next_u64(), - write_count: thread_rng().next_u64(), - read_length: thread_rng().next_u64(), - read_count: thread_rng().next_u64(), - runtime: thread_rng().next_u64(), - }, - estimated_cost_scalar: thread_rng().next_u64(), - cost_scalar_change_by_byte: thread_rng().next_u32() as f64, - estimations: vec![ - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: medium_estimate, - }, - RPCFeeEstimate { - fee_rate: thread_rng().next_u32() as f64, - fee: thread_rng().next_u64(), - }, - ], - }; - let fee_response_json = serde_json::to_string(&fee_response) - .expect("Failed to serialize fee estimate response"); - ( - format!("HTTP/1.1 200 OK\n\n{fee_response_json}"), - fee_response, - ) - } - /// Generate a signer config with the given number of signers and keys where the first signer is /// obtained from the provided global config pub fn generate_signer_config( @@ -453,8 +433,8 @@ pub(crate) mod tests { "Cannot generate 0 keys for the provided signers...Specify at least 1 key." ); let mut public_keys = PublicKeys { - signers: HashMap::new(), - key_ids: HashMap::new(), + signers: hashbrown::HashMap::new(), + key_ids: hashbrown::HashMap::new(), }; let reward_cycle = thread_rng().next_u64(); let rng = &mut OsRng; @@ -536,7 +516,7 @@ pub(crate) mod tests { signer_id: 0, signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers key_ids: signer_key_ids.get(&0).cloned().unwrap_or_default(), - signer_entries: SignerEntries { + signer_entries: ParsedSignerEntries { public_keys, coordinator_key_ids, signer_key_ids, @@ -554,32 +534,6 @@ pub(crate) mod tests { nonce_timeout: config.nonce_timeout, sign_timeout: config.sign_timeout, tx_fee_ustx: config.tx_fee_ustx, - max_tx_fee_ustx: config.max_tx_fee_ustx, - db_path: config.db_path.clone(), } } - - pub fn build_get_round_info_response(info: Option<(u64, u64)>) -> String { - let clarity_value = if let Some((vote_count, vote_weight)) = info { - ClarityValue::some(ClarityValue::Tuple( - TupleData::from_data(vec![ - ("votes-count".into(), ClarityValue::UInt(vote_count as u128)), - ( - "votes-weight".into(), - ClarityValue::UInt(vote_weight as u128), - ), - ]) - .expect("BUG: Failed to create clarity value from tuple data"), - )) - .expect("BUG: Failed to create clarity value from tuple data") - } else { - ClarityValue::none() - }; - build_read_only_response(&clarity_value) - } - - pub fn build_get_weight_threshold_response(threshold: u64) -> String { - let clarity_value = ClarityValue::UInt(threshold as u128); - build_read_only_response(&clarity_value) - } } diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 4b6e993bcf0..f1436ac34d6 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -14,14 +14,18 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . // +use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::chainstate::stacks::StacksTransaction; -use blockstack_lib::net::api::poststackerdbchunk::StackerDBErrorCodes; -use hashbrown::HashMap; -use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; +use blockstack_lib::util_lib::boot::boot_code_addr; +use clarity::vm::types::QualifiedContractIdentifier; +use clarity::vm::ContractName; +use libsigner::{SignerMessage, SignerSession, StackerDBSession, TRANSACTIONS_MSG_ID}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; use slog::{slog_debug, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; +use stacks_common::consts::SIGNER_SLOTS_PER_USER; use stacks_common::types::chainstate::StacksPrivateKey; +use stacks_common::types::StacksHashMap as HashMap; use stacks_common::{debug, warn}; use super::ClientError; @@ -33,11 +37,11 @@ use crate::signer::SignerSlotID; pub struct StackerDB { /// The stacker-db sessions for each signer set and message type. /// Maps message ID to the DB session. - signers_message_stackerdb_sessions: HashMap, + signers_message_stackerdb_sessions: HashMap, /// The private key used in all stacks node communications stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session - slot_versions: HashMap>, + slot_versions: HashMap>, /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. signer_slot_id: SignerSlotID, /// The reward cycle of the connecting signer @@ -48,7 +52,7 @@ pub struct StackerDB { impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { - Self::new( + StackerDB::new( &config.node_host, config.stacks_private_key, config.mainnet, @@ -67,16 +71,33 @@ impl StackerDB { signer_slot_id: SignerSlotID, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); - for msg_id in MessageSlotID::ALL { + let stackerdb_issuer = boot_code_addr(is_mainnet); + for msg_id in 0..SIGNER_SLOTS_PER_USER { signers_message_stackerdb_sessions.insert( - *msg_id, - StackerDBSession::new(host, msg_id.stacker_db_contract(is_mainnet, reward_cycle)), + msg_id, + StackerDBSession::new( + host, + QualifiedContractIdentifier::new( + stackerdb_issuer.into(), + ContractName::from( + NakamotoSigners::make_signers_db_name(reward_cycle, msg_id).as_str(), + ), + ), + ), ); } let next_transaction_session = StackerDBSession::new( host, - MessageSlotID::Transactions - .stacker_db_contract(is_mainnet, reward_cycle.wrapping_add(1)), + QualifiedContractIdentifier::new( + stackerdb_issuer.into(), + ContractName::from( + NakamotoSigners::make_signers_db_name( + reward_cycle.wrapping_add(1), + TRANSACTIONS_MSG_ID, + ) + .as_str(), + ), + ), ); Self { @@ -94,21 +115,11 @@ impl StackerDB { &mut self, message: SignerMessage, ) -> Result { - let msg_id = message.msg_id(); let message_bytes = message.serialize_to_vec(); - self.send_message_bytes_with_retry(&msg_id, message_bytes) - } - - /// Sends message (as a raw msg ID and bytes) to the .signers stacker-db with an - /// exponential backoff retry - pub fn send_message_bytes_with_retry( - &mut self, - msg_id: &MessageSlotID, - message_bytes: Vec, - ) -> Result { + let msg_id = message.msg_id(); let slot_id = self.signer_slot_id; loop { - let mut slot_version = if let Some(versions) = self.slot_versions.get_mut(msg_id) { + let slot_version = if let Some(versions) = self.slot_versions.get_mut(&msg_id) { if let Some(version) = versions.get(&slot_id) { *version } else { @@ -118,14 +129,14 @@ impl StackerDB { } else { let mut versions = HashMap::new(); versions.insert(slot_id, 0); - self.slot_versions.insert(*msg_id, versions); + self.slot_versions.insert(msg_id, versions); 1 }; let mut chunk = StackerDBChunkData::new(slot_id.0, slot_version, message_bytes.clone()); chunk.sign(&self.stacks_private_key)?; - let Some(session) = self.signers_message_stackerdb_sessions.get_mut(msg_id) else { + let Some(session) = self.signers_message_stackerdb_sessions.get_mut(&msg_id) else { panic!("FATAL: would loop forever trying to send a message with ID {}, for which we don't have a session", msg_id); }; @@ -137,7 +148,7 @@ impl StackerDB { let send_request = || session.put_chunk(&chunk).map_err(backoff::Error::transient); let chunk_ack: StackerDBChunkAckData = retry_with_exponential_backoff(send_request)?; - if let Some(versions) = self.slot_versions.get_mut(msg_id) { + if let Some(versions) = self.slot_versions.get_mut(&msg_id) { // NOTE: per the above, this is always executed versions.insert(slot_id, slot_version.saturating_add(1)); } else { @@ -150,30 +161,20 @@ impl StackerDB { } else { warn!("Chunk rejected by stackerdb: {chunk_ack:?}"); } - if let Some(code) = chunk_ack.code { - match StackerDBErrorCodes::from_code(code) { - Some(StackerDBErrorCodes::DataAlreadyExists) => { - if let Some(slot_metadata) = chunk_ack.metadata { - warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected {}. Retrying...", slot_version, slot_metadata.slot_version); - slot_version = slot_metadata.slot_version; - } else { - warn!("Failed to send message to stackerdb due to wrong version number. Attempted {}. Expected unknown version number. Incrementing and retrying...", slot_version); - } - if let Some(versions) = self.slot_versions.get_mut(msg_id) { - // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); - } else { - return Err(ClientError::NotConnected); - } - } - _ => { - warn!("Failed to send message to stackerdb: {:?}", chunk_ack); - return Err(ClientError::PutChunkRejected( - chunk_ack - .reason - .unwrap_or_else(|| "No reason given".to_string()), - )); + if let Some(reason) = chunk_ack.reason { + // TODO: fix this jankiness. Update stackerdb to use an error code mapping instead of just a string + // See: https://github.com/stacks-network/stacks-blockchain/issues/3917 + if reason.contains("Data for this slot and version already exist") { + warn!("Failed to send message to stackerdb due to wrong version number {}. Incrementing and retrying...", slot_version); + if let Some(versions) = self.slot_versions.get_mut(&msg_id) { + // NOTE: per the above, this is always executed + versions.insert(slot_id, slot_version.saturating_add(1)); + } else { + return Err(ClientError::NotConnected); } + } else { + warn!("Failed to send message to stackerdb: {}", reason); + return Err(ClientError::PutChunkRejected(reason)); } } } @@ -224,10 +225,12 @@ impl StackerDB { } /// Get this signer's latest transactions from stackerdb - pub fn get_current_transactions(&mut self) -> Result, ClientError> { + pub fn get_current_transactions_with_retry( + &mut self, + ) -> Result, ClientError> { let Some(transactions_session) = self .signers_message_stackerdb_sessions - .get_mut(&MessageSlotID::Transactions) + .get_mut(&TRANSACTIONS_MSG_ID) else { return Err(ClientError::NotConnected); }; @@ -235,7 +238,7 @@ impl StackerDB { } /// Get the latest signer transactions from signer ids for the next reward cycle - pub fn get_next_transactions( + pub fn get_next_transactions_with_retry( &mut self, signer_ids: &[SignerSlotID], ) -> Result, ClientError> { @@ -270,7 +273,7 @@ mod tests { use crate::config::GlobalConfig; #[test] - fn get_signer_transactions_should_succeed() { + fn get_signer_transactions_with_retry_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-0.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); @@ -295,7 +298,7 @@ mod tests { let message = signer_message.serialize_to_vec(); let signer_slot_ids = vec![SignerSlotID(0), SignerSlotID(1)]; - let h = spawn(move || stackerdb.get_next_transactions(&signer_slot_ids)); + let h = spawn(move || stackerdb.get_next_transactions_with_retry(&signer_slot_ids)); let mut response_bytes = b"HTTP/1.1 200 OK\n\n".to_vec(); response_bytes.extend(message); let mock_server = mock_server_from_config(&config); @@ -313,7 +316,7 @@ mod tests { } #[test] - fn send_signer_message_should_succeed() { + fn send_signer_message_with_retry_should_succeed() { let config = GlobalConfig::load_from_file("./src/tests/conf/signer-1.toml").unwrap(); let signer_config = generate_signer_config(&config, 5, 20); let mut stackerdb = StackerDB::from(&signer_config); @@ -340,7 +343,6 @@ mod tests { accepted: true, reason: None, metadata: None, - code: None, }; let mock_server = mock_server_from_config(&config); let h = spawn(move || stackerdb.send_message_with_retry(signer_message)); diff --git a/stacks-signer/src/client/stacks_client.rs b/stacks-signer/src/client/stacks_client.rs index 7d4e32c43d6..80481d5981d 100644 --- a/stacks-signer/src/client/stacks_client.rs +++ b/stacks-signer/src/client/stacks_client.rs @@ -31,9 +31,7 @@ use blockstack_lib::net::api::getinfo::RPCPeerInfoData; use blockstack_lib::net::api::getpoxinfo::RPCPoxInfoData; use blockstack_lib::net::api::getstackers::GetStackersResponse; use blockstack_lib::net::api::postblock_proposal::NakamotoBlockProposal; -use blockstack_lib::net::api::postfeerate::{FeeRateEstimateRequestBody, RPCFeeEstimateResponse}; use blockstack_lib::util_lib::boot::{boot_code_addr, boot_code_id}; -use clarity::util::hash::to_hex; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::{ClarityName, ContractName, Value as ClarityValue}; use reqwest::header::AUTHORIZATION; @@ -48,7 +46,6 @@ use wsts::curve::point::{Compressed, Point}; use crate::client::{retry_with_exponential_backoff, ClientError}; use crate::config::GlobalConfig; -use crate::runloop::RewardCycleInfo; /// The Stacks signer client used to communicate with the stacks node #[derive(Clone, Debug)] @@ -119,7 +116,7 @@ impl StacksClient { } /// Get our signer address - pub const fn get_signer_address(&self) -> &StacksAddress { + pub fn get_signer_address(&self) -> &StacksAddress { &self.stacks_address } @@ -147,7 +144,7 @@ impl StacksClient { value: ClarityValue, ) -> Result, ClientError> { debug!("Parsing signer slots..."); - let value = value.expect_result_ok()?; + let value = value.clone().expect_result_ok()?; let values = value.expect_list()?; let mut signer_slots = Vec::with_capacity(values.len()); for value in values { @@ -198,40 +195,6 @@ impl StacksClient { } } - /// Retrieve the medium estimated transaction fee in uSTX from the stacks node for the given transaction - pub fn get_medium_estimated_fee_ustx( - &self, - tx: &StacksTransaction, - ) -> Result { - let request = FeeRateEstimateRequestBody { - estimated_len: Some(tx.tx_len()), - transaction_payload: to_hex(&tx.payload.serialize_to_vec()), - }; - let send_request = || { - self.stacks_node_client - .post(self.fees_transaction_path()) - .header("Content-Type", "application/json") - .json(&request) - .send() - .map_err(backoff::Error::transient) - }; - let response = retry_with_exponential_backoff(send_request)?; - if !response.status().is_success() { - return Err(ClientError::RequestFailure(response.status())); - } - let fee_estimate_response = response.json::()?; - let fee = fee_estimate_response - .estimations - .get(1) - .map(|estimate| estimate.fee) - .ok_or_else(|| { - ClientError::UnexpectedResponseFormat( - "RPCFeeEstimateResponse missing medium fee estimate".into(), - ) - })?; - Ok(fee) - } - /// Determine the stacks node current epoch pub fn get_node_epoch(&self) -> Result { let pox_info = self.get_pox_data()?; @@ -291,51 +254,6 @@ impl StacksClient { reward_cycle: u64, ) -> Result, ClientError> { let function_name = ClarityName::from("get-approved-aggregate-key"); - let voting_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; - let value = self.read_only_contract_call( - &voting_contract_id.issuer.into(), - &voting_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - inner_data.map_or_else( - || Ok(None), - |key_value| self.parse_aggregate_public_key(key_value), - ) - } - - /// Retrieve the current consumed weight for the given reward cycle and DKG round - pub fn get_round_vote_weight( - &self, - reward_cycle: u64, - round_id: u64, - ) -> Result, ClientError> { - let function_name = ClarityName::from("get-round-info"); - let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); - let function_args = &[ - ClarityValue::UInt(reward_cycle as u128), - ClarityValue::UInt(round_id as u128), - ]; - let value = self.read_only_contract_call( - &pox_contract_id.issuer.into(), - &pox_contract_id.name, - &function_name, - function_args, - )?; - let inner_data = value.expect_optional()?; - let Some(inner_data) = inner_data else { - return Ok(None); - }; - let round_info = inner_data.expect_tuple()?; - let votes_weight = round_info.get("votes-weight")?.to_owned().expect_u128()?; - Ok(Some(votes_weight)) - } - - /// Retrieve the weight threshold required to approve a DKG vote - pub fn get_vote_threshold_weight(&self, reward_cycle: u64) -> Result { - let function_name = ClarityName::from("get-threshold-weight"); let pox_contract_id = boot_code_id(SIGNERS_VOTING_NAME, self.mainnet); let function_args = &[ClarityValue::UInt(reward_cycle as u128)]; let value = self.read_only_contract_call( @@ -344,12 +262,18 @@ impl StacksClient { &function_name, function_args, )?; - Ok(value.expect_u128()?) + let inner_data = value.expect_optional()?; + if let Some(key_value) = inner_data { + self.parse_aggregate_public_key(key_value) + } else { + Ok(None) + } } /// Retrieve the current account nonce for the provided address pub fn get_account_nonce(&self, address: &StacksAddress) -> Result { - self.get_account_entry(address).map(|entry| entry.nonce) + let account_entry = self.get_account_entry(address)?; + Ok(account_entry.nonce) } /// Get the current peer info data from the stacks node @@ -435,11 +359,12 @@ impl StacksClient { /// Helper function to retrieve the burn tip height from the stacks node fn get_burn_block_height(&self) -> Result { - self.get_peer_info().map(|info| info.burn_block_height) + let peer_info = self.get_peer_info()?; + Ok(peer_info.burn_block_height) } - /// Get the current reward cycle info from the stacks node - pub fn get_current_reward_cycle_info(&self) -> Result { + /// Get the current reward cycle from the stacks node + pub fn get_current_reward_cycle(&self) -> Result { let pox_data = self.get_pox_data()?; let blocks_mined = pox_data .current_burnchain_block_height @@ -447,14 +372,7 @@ impl StacksClient { let reward_cycle_length = pox_data .reward_phase_block_length .saturating_add(pox_data.prepare_phase_block_length); - let reward_cycle = blocks_mined / reward_cycle_length; - Ok(RewardCycleInfo { - reward_cycle, - reward_cycle_length, - prepare_phase_block_length: pox_data.prepare_phase_block_length, - first_burnchain_block_height: pox_data.first_burnchain_block_height, - last_burnchain_block_height: pox_data.current_burnchain_block_height, - }) + Ok(blocks_mined / reward_cycle_length) } /// Helper function to retrieve the account info from the stacks node for a specific address @@ -500,12 +418,13 @@ impl StacksClient { } /// Helper function to create a stacks transaction for a modifying contract call - pub fn build_unsigned_vote_for_aggregate_public_key( + pub fn build_vote_for_aggregate_public_key( &self, signer_index: u32, round: u64, dkg_public_key: Point, reward_cycle: u64, + tx_fee: Option, nonce: u64, ) -> Result { debug!("Building {SIGNERS_VOTING_FUNCTION_NAME} transaction..."); @@ -518,8 +437,9 @@ impl StacksClient { ClarityValue::UInt(round as u128), ClarityValue::UInt(reward_cycle as u128), ]; + let tx_fee = tx_fee.unwrap_or(0); - let unsigned_tx = Self::build_unsigned_contract_call_transaction( + Self::build_signed_contract_call_transaction( &contract_address, contract_name, function_name, @@ -528,8 +448,8 @@ impl StacksClient { self.tx_version, self.chain_id, nonce, - )?; - Ok(unsigned_tx) + tx_fee, + ) } /// Helper function to submit a transaction to the Stacks mempool @@ -578,9 +498,9 @@ impl StacksClient { let path = self.read_only_path(contract_addr, contract_name, function_name); let response = self .stacks_node_client - .post(path) + .post(path.clone()) .header("Content-Type", "application/json") - .body(body) + .body(body.clone()) .send()?; if !response.status().is_success() { return Err(ClientError::RequestFailure(response.status())); @@ -591,7 +511,7 @@ impl StacksClient { "{function_name}: {}", call_read_only_response .cause - .unwrap_or_else(|| "unknown".to_string()) + .unwrap_or("unknown".to_string()) ))); } let hex = call_read_only_response.result.unwrap_or_default(); @@ -635,13 +555,9 @@ impl StacksClient { format!("{}/v2/stacker_set/{reward_cycle}", self.http_origin) } - fn fees_transaction_path(&self) -> String { - format!("{}/v2/fees/transaction", self.http_origin) - } - /// Helper function to create a stacks transaction for a modifying contract call #[allow(clippy::too_many_arguments)] - pub fn build_unsigned_contract_call_transaction( + pub fn build_signed_contract_call_transaction( contract_addr: &StacksAddress, contract_name: ContractName, function_name: ClarityName, @@ -650,6 +566,7 @@ impl StacksClient { tx_version: TransactionVersion, chain_id: u32, nonce: u64, + tx_fee: u64, ) -> Result { let tx_payload = TransactionPayload::ContractCall(TransactionContractCall { address: *contract_addr, @@ -668,22 +585,17 @@ impl StacksClient { ); let mut unsigned_tx = StacksTransaction::new(tx_version, tx_auth, tx_payload); + + unsigned_tx.set_tx_fee(tx_fee); unsigned_tx.set_origin_nonce(nonce); unsigned_tx.anchor_mode = TransactionAnchorMode::Any; unsigned_tx.post_condition_mode = TransactionPostConditionMode::Allow; unsigned_tx.chain_id = chain_id; - Ok(unsigned_tx) - } - /// Sign an unsigned transaction - pub fn sign_transaction( - &self, - unsigned_tx: StacksTransaction, - ) -> Result { let mut tx_signer = StacksTransactionSigner::new(&unsigned_tx); tx_signer - .sign_origin(&self.stacks_private_key) + .sign_origin(stacks_private_key) .map_err(|e| ClientError::TransactionGenerationFailure(e.to_string()))?; tx_signer @@ -717,10 +629,9 @@ mod tests { use super::*; use crate::client::tests::{ build_account_nonce_response, build_get_approved_aggregate_key_response, - build_get_last_round_response, build_get_medium_estimated_fee_ustx_response, - build_get_peer_info_response, build_get_pox_data_response, build_get_round_info_response, - build_get_vote_for_aggregate_key_response, build_get_weight_threshold_response, - build_read_only_response, write_response, MockServerClient, + build_get_last_round_response, build_get_peer_info_response, build_get_pox_data_response, + build_get_vote_for_aggregate_key_response, build_read_only_response, write_response, + MockServerClient, }; #[test] @@ -824,9 +735,9 @@ mod tests { fn valid_reward_cycle_should_succeed() { let mock = MockServerClient::new(); let (pox_data_response, pox_data) = build_get_pox_data_response(None, None, None, None); - let h = spawn(move || mock.client.get_current_reward_cycle_info()); + let h = spawn(move || mock.client.get_current_reward_cycle()); write_response(mock.server, pox_data_response.as_bytes()); - let current_cycle_info = h.join().unwrap().unwrap(); + let current_cycle_id = h.join().unwrap().unwrap(); let blocks_mined = pox_data .current_burnchain_block_height .saturating_sub(pox_data.first_burnchain_block_height); @@ -834,13 +745,13 @@ mod tests { .reward_phase_block_length .saturating_add(pox_data.prepare_phase_block_length); let id = blocks_mined / reward_cycle_length; - assert_eq!(current_cycle_info.reward_cycle, id); + assert_eq!(current_cycle_id, id); } #[test] fn invalid_reward_cycle_should_fail() { let mock = MockServerClient::new(); - let h = spawn(move || mock.client.get_current_reward_cycle_info()); + let h = spawn(move || mock.client.get_current_reward_cycle()); write_response( mock.server, b"HTTP/1.1 200 Ok\n\n{\"current_cycle\":{\"id\":\"fake id\", \"is_pox_active\":false}}", @@ -888,11 +799,12 @@ mod tests { assert!(result.is_err()) } + #[ignore] #[test] fn transaction_contract_call_should_send_bytes_to_node() { let mock = MockServerClient::new(); let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( + let tx = StacksClient::build_signed_contract_call_transaction( &mock.client.stacks_address, ContractName::from("contract-name"), ClarityName::from("function-name"), @@ -901,11 +813,10 @@ mod tests { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, + 10_000, ) .unwrap(); - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - let mut tx_bytes = [0u8; 1024]; { let mut tx_bytes_writer = BufWriter::new(&mut tx_bytes[..]); @@ -940,6 +851,7 @@ mod tests { ); } + #[ignore] #[test] fn build_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -950,17 +862,19 @@ mod tests { let reward_cycle = thread_rng().next_u64(); let h = spawn(move || { - mock.client.build_unsigned_vote_for_aggregate_public_key( + mock.client.build_vote_for_aggregate_public_key( signer_index, round, point, reward_cycle, + None, nonce, ) }); assert!(h.join().unwrap().is_ok()); } + #[ignore] #[test] fn broadcast_vote_for_aggregate_public_key_should_succeed() { let mock = MockServerClient::new(); @@ -969,27 +883,28 @@ mod tests { let signer_index = thread_rng().next_u32(); let round = thread_rng().next_u64(); let reward_cycle = thread_rng().next_u64(); - let unsigned_tx = mock - .client - .build_unsigned_vote_for_aggregate_public_key( - signer_index, - round, - point, - reward_cycle, - nonce, - ) - .unwrap(); - let tx = mock.client.sign_transaction(unsigned_tx).unwrap(); - let tx_clone = tx.clone(); - let h = spawn(move || mock.client.submit_transaction(&tx_clone)); + let h = spawn(move || { + let tx = mock + .client + .clone() + .build_vote_for_aggregate_public_key( + signer_index, + round, + point, + reward_cycle, + None, + nonce, + ) + .unwrap(); + mock.client.submit_transaction(&tx) + }); + let mock = MockServerClient::from_config(mock.config); write_response( mock.server, - format!("HTTP/1.1 200 OK\n\n{}", tx.txid()).as_bytes(), + b"HTTP/1.1 200 OK\n\n4e99f99bc4a05437abb8c7d0c306618f45b203196498e2ebe287f10497124958", ); - let returned_txid = h.join().unwrap().unwrap(); - - assert_eq!(returned_txid, tx.txid()); + assert!(h.join().unwrap().is_ok()); } #[test] @@ -1040,13 +955,13 @@ mod tests { fn parse_valid_signer_slots_should_succeed() { let mock = MockServerClient::new(); let clarity_value_hex = - "0x070b000000050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a8195196a9a7cf9c37cb13e1ed69a7bc047a84e050c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a6505471146dcf722f0580911183f28bef30a8a890c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a1d7f8e3936e5da5f32982cc47f31d7df9fb1b38a0c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a126d1a814313c952e34c7840acec9211e1727fb80c00000002096e756d2d736c6f7473010000000000000000000000000000000d067369676e6572051a7374ea6bb39f2e8d3d334d62b9f302a977de339a"; + "0x070b000000050c00000002096e756d2d736c6f7473010000000000000000000000000000000c067369676e6572051a8195196a9a7cf9c37cb13e1ed69a7bc047a84e050c00000002096e756d2d736c6f7473010000000000000000000000000000000c067369676e6572051a6505471146dcf722f0580911183f28bef30a8a890c00000002096e756d2d736c6f7473010000000000000000000000000000000c067369676e6572051a1d7f8e3936e5da5f32982cc47f31d7df9fb1b38a0c00000002096e756d2d736c6f7473010000000000000000000000000000000c067369676e6572051a126d1a814313c952e34c7840acec9211e1727fb80c00000002096e756d2d736c6f7473010000000000000000000000000000000c067369676e6572051a7374ea6bb39f2e8d3d334d62b9f302a977de339a"; let value = ClarityValue::try_deserialize_hex_untyped(clarity_value_hex).unwrap(); let signer_slots = mock.client.parse_signer_slots(value).unwrap(); assert_eq!(signer_slots.len(), 5); signer_slots .into_iter() - .for_each(|(_address, slots)| assert_eq!(slots, SIGNER_SLOTS_PER_USER as u128)); + .for_each(|(_address, slots)| assert!(slots == SIGNER_SLOTS_PER_USER as u128)); } #[test] @@ -1228,7 +1143,6 @@ mod tests { stacked_amt: rand::thread_rng().next_u64() as u128, weight: 1, }]), - pox_ustx_threshold: None, }; let stackers_response = GetStackersResponse { stacker_set: stacker_set.clone(), @@ -1265,54 +1179,4 @@ mod tests { write_response(mock.server, key_response.as_bytes()); assert_eq!(h.join().unwrap().unwrap(), None); } - - #[test] - fn get_round_vote_weight_should_succeed() { - let mock = MockServerClient::new(); - let vote_count = rand::thread_rng().next_u64(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_round_info_response(Some((vote_count, weight))); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), Some(weight as u128)); - - let mock = MockServerClient::new(); - let round_response = build_get_round_info_response(None); - let h = spawn(move || mock.client.get_round_vote_weight(0, 0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), None); - } - - #[test] - fn get_vote_threshold_weight_should_succeed() { - let mock = MockServerClient::new(); - let weight = rand::thread_rng().next_u64(); - let round_response = build_get_weight_threshold_response(weight); - let h = spawn(move || mock.client.get_vote_threshold_weight(0)); - write_response(mock.server, round_response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), weight as u128); - } - - #[test] - fn get_medium_estimated_fee_ustx_should_succeed() { - let mock = MockServerClient::new(); - let private_key = StacksPrivateKey::new(); - let unsigned_tx = StacksClient::build_unsigned_contract_call_transaction( - &mock.client.stacks_address, - ContractName::from("contract-name"), - ClarityName::from("function-name"), - &[], - &private_key, - TransactionVersion::Testnet, - CHAIN_ID_TESTNET, - 0, - ) - .unwrap(); - - let estimate = thread_rng().next_u64(); - let response = build_get_medium_estimated_fee_ustx_response(estimate).0; - let h = spawn(move || mock.client.get_medium_estimated_fee_ustx(&unsigned_tx)); - write_response(mock.server, response.as_bytes()); - assert_eq!(h.join().unwrap().unwrap(), estimate); - } } diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index a7aed7f6cd5..ed0ca10e774 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -20,20 +20,22 @@ use std::path::PathBuf; use std::time::Duration; use blockstack_lib::chainstate::stacks::TransactionVersion; -use libsigner::SignerEntries; use serde::Deserialize; use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use stacks_common::consts::{CHAIN_ID_MAINNET, CHAIN_ID_TESTNET}; use stacks_common::types::chainstate::{StacksAddress, StacksPrivateKey, StacksPublicKey}; -use stacks_common::types::PrivateKey; +use stacks_common::types::{PrivateKey, StacksHashMap as HashMap, StacksHashSet as HashSet}; +use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; +use wsts::state_machine::PublicKeys; use crate::signer::SignerSlotID; const EVENT_TIMEOUT_MS: u64 = 5000; -// Default transaction fee to use in microstacks (if unspecificed in the config file) +// Default transaction fee in microstacks (if unspecificed in the config file) +// TODO: Use the fee estimation endpoint to get the default fee. const TX_FEE_USTX: u64 = 10_000; #[derive(thiserror::Error, Debug)] @@ -53,7 +55,7 @@ pub enum ConfigError { UnsupportedAddressVersion, } -#[derive(serde::Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(serde::Deserialize, Debug, Clone, PartialEq)] #[serde(rename_all = "lowercase")] /// The Stacks network to use. pub enum Network { @@ -77,7 +79,7 @@ impl std::fmt::Display for Network { impl Network { /// Converts a Network enum variant to a corresponding chain id - pub const fn to_chain_id(&self) -> u32 { + pub fn to_chain_id(&self) -> u32 { match self { Self::Mainnet => CHAIN_ID_MAINNET, Self::Testnet | Self::Mocknet => CHAIN_ID_TESTNET, @@ -85,7 +87,7 @@ impl Network { } /// Convert a Network enum variant to a corresponding address version - pub const fn to_address_version(&self) -> u8 { + pub fn to_address_version(&self) -> u8 { match self { Self::Mainnet => C32_ADDRESS_VERSION_MAINNET_SINGLESIG, Self::Testnet | Self::Mocknet => C32_ADDRESS_VERSION_TESTNET_SINGLESIG, @@ -93,7 +95,7 @@ impl Network { } /// Convert a Network enum variant to a Transaction Version - pub const fn to_transaction_version(&self) -> TransactionVersion { + pub fn to_transaction_version(&self) -> TransactionVersion { match self { Self::Mainnet => TransactionVersion::Mainnet, Self::Testnet | Self::Mocknet => TransactionVersion::Testnet, @@ -101,7 +103,7 @@ impl Network { } /// Check if the network is Mainnet or not - pub const fn is_mainnet(&self) -> bool { + pub fn is_mainnet(&self) -> bool { match self { Self::Mainnet => true, Self::Testnet | Self::Mocknet => false, @@ -109,6 +111,22 @@ impl Network { } } +/// Parsed Reward Set +#[derive(Debug, Clone)] +pub struct ParsedSignerEntries { + /// The signer addresses mapped to signer id + pub signer_ids: HashMap, + /// The signer ids mapped to public key and key ids mapped to public keys + pub public_keys: PublicKeys, + /// The signer ids mapped to key ids + pub signer_key_ids: HashMap>, + /// The signer ids mapped to wsts public keys + pub signer_public_keys: HashMap, + /// The signer ids mapped to a hash set of key ids + /// The wsts coordinator uses a hash set for each signer since it needs to do lots of lookups + pub coordinator_key_ids: HashMap>, +} + /// The Configuration info needed for an individual signer per reward cycle #[derive(Debug, Clone)] pub struct SignerConfig { @@ -121,7 +139,7 @@ pub struct SignerConfig { /// This signer's key ids pub key_ids: Vec, /// The registered signers for this reward cycle - pub signer_entries: SignerEntries, + pub signer_entries: ParsedSignerEntries, /// The signer slot ids of all signers registered for this reward cycle pub signer_slot_ids: Vec, /// The Scalar representation of the private key for signer communication @@ -142,12 +160,8 @@ pub struct SignerConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. + /// the STX tx fee to use in uSTX pub tx_fee_ustx: u64, - /// If set, will use the estimated fee up to this amount. - pub max_tx_fee_ustx: Option, - /// The path to the signer's database file - pub db_path: PathBuf, } /// The parsed configuration for the signer @@ -177,14 +191,10 @@ pub struct GlobalConfig { pub nonce_timeout: Option, /// timeout to gather signature shares pub sign_timeout: Option, - /// the STX tx fee to use in uSTX. + /// the STX tx fee to use in uSTX pub tx_fee_ustx: u64, - /// the max STX tx fee to use in uSTX when estimating fees - pub max_tx_fee_ustx: Option, /// the authorization password for the block proposal endpoint pub auth_password: String, - /// The path to the signer's database file - pub db_path: PathBuf, } /// Internal struct for loading up the config file @@ -211,21 +221,16 @@ struct RawConfigFile { pub nonce_timeout_ms: Option, /// timeout in (millisecs) to gather signature shares pub sign_timeout_ms: Option, - /// the STX tx fee to use in uSTX. If not set, will default to TX_FEE_USTX + /// the STX tx fee to use in uSTX pub tx_fee_ustx: Option, - /// the max STX tx fee to use in uSTX when estimating fees. - /// If not set, will use tx_fee_ustx. - pub max_tx_fee_ustx: Option, /// The authorization password for the block proposal endpoint pub auth_password: String, - /// The path to the signer's database file or :memory: for an in-memory database - pub db_path: String, } impl RawConfigFile { /// load the config from a string pub fn load_from_str(data: &str) -> Result { - let config: Self = + let config: RawConfigFile = toml::from_str(data).map_err(|e| ConfigError::ParseError(format!("{e:?}")))?; Ok(config) } @@ -240,7 +245,7 @@ impl TryFrom<&PathBuf> for RawConfigFile { type Error = ConfigError; fn try_from(path: &PathBuf) -> Result { - Self::load_from_str(&fs::read_to_string(path).map_err(|e| { + RawConfigFile::load_from_str(&fs::read_to_string(path).map_err(|e| { ConfigError::InvalidConfig(format!("failed to read config file: {e:?}")) })?) } @@ -261,9 +266,10 @@ impl TryFrom for GlobalConfig { .to_socket_addrs() .map_err(|_| ConfigError::BadField("endpoint".to_string(), raw_data.endpoint.clone()))? .next() - .ok_or_else(|| { - ConfigError::BadField("endpoint".to_string(), raw_data.endpoint.clone()) - })?; + .ok_or(ConfigError::BadField( + "endpoint".to_string(), + raw_data.endpoint.clone(), + ))?; let stacks_private_key = StacksPrivateKey::from_hex(&raw_data.stacks_private_key).map_err(|_| { @@ -295,8 +301,6 @@ impl TryFrom for GlobalConfig { let dkg_private_timeout = raw_data.dkg_private_timeout_ms.map(Duration::from_millis); let nonce_timeout = raw_data.nonce_timeout_ms.map(Duration::from_millis); let sign_timeout = raw_data.sign_timeout_ms.map(Duration::from_millis); - let db_path = raw_data.db_path.into(); - Ok(Self { node_host: raw_data.node_host, endpoint, @@ -311,9 +315,7 @@ impl TryFrom for GlobalConfig { nonce_timeout, sign_timeout, tx_fee_ustx: raw_data.tx_fee_ustx.unwrap_or(TX_FEE_USTX), - max_tx_fee_ustx: raw_data.max_tx_fee_ustx, auth_password: raw_data.auth_password, - db_path, }) } } @@ -339,31 +341,19 @@ impl GlobalConfig { } /// Helper function for building a signer config for each provided signer private key -#[allow(clippy::too_many_arguments)] pub fn build_signer_config_tomls( stacks_private_keys: &[StacksPrivateKey], node_host: &str, timeout: Option, network: &Network, password: &str, - run_stamp: u16, - mut port_start: usize, - max_tx_fee_ustx: Option, - tx_fee_ustx: Option, ) -> Vec { let mut signer_config_tomls = vec![]; + let mut port = 30000; for stacks_private_key in stacks_private_keys { - let endpoint = format!("localhost:{}", port_start); - port_start += 1; - - let stacks_public_key = StacksPublicKey::from_private(stacks_private_key).to_hex(); - let db_dir = format!( - "/tmp/stacks-node-tests/integrations-signers/{run_stamp}/signer_{stacks_public_key}" - ); - let db_path = format!("{db_dir}/signerdb.sqlite"); - fs::create_dir_all(&db_dir).unwrap(); - + let endpoint = format!("localhost:{}", port); + port += 1; let stacks_private_key = stacks_private_key.to_hex(); let mut signer_config_toml = format!( r#" @@ -372,7 +362,6 @@ node_host = "{node_host}" endpoint = "{endpoint}" network = "{network}" auth_password = "{password}" -db_path = "{db_path}" "# ); @@ -381,25 +370,7 @@ db_path = "{db_path}" signer_config_toml = format!( r#" {signer_config_toml} -event_timeout = {event_timeout_ms} -"# - ) - } - - if let Some(max_tx_fee_ustx) = max_tx_fee_ustx { - signer_config_toml = format!( - r#" -{signer_config_toml} -max_tx_fee_ustx = {max_tx_fee_ustx} -"# - ) - } - - if let Some(tx_fee_ustx) = tx_fee_ustx { - signer_config_toml = format!( - r#" -{signer_config_toml} -tx_fee_ustx = {tx_fee_ustx} +event_timeout = {event_timeout_ms} "# ) } @@ -409,143 +380,3 @@ tx_fee_ustx = {tx_fee_ustx} signer_config_tomls } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn build_signer_config_tomls_should_produce_deserializable_strings() { - let pk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let node_host = "localhost"; - let network = Network::Testnet; - let password = "melon"; - - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.auth_password, "melon"); - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); - } - - #[test] - fn fee_options_should_deserialize_correctly() { - let pk = StacksPrivateKey::from_hex( - "eb05c83546fdd2c79f10f5ad5434a90dd28f7e3acb7c092157aa1bc3656b012c01", - ) - .unwrap(); - - let node_host = "localhost"; - let network = Network::Testnet; - let password = "melon"; - - // Test both max_tx_fee_ustx and tx_fee_ustx are unspecified - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test both max_tx_fee_ustx and tx_fee_ustx are specified - let max_tx_fee_ustx = Some(1000); - let tx_fee_ustx = Some(2000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - tx_fee_ustx, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - // Test only max_tx_fee_ustx is specified - let max_tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - max_tx_fee_ustx, - None, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert!(config.tx_fee_ustx.is_none()); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert_eq!(config.max_tx_fee_ustx, max_tx_fee_ustx); - assert_eq!(config.tx_fee_ustx, TX_FEE_USTX); - - // Test only tx_fee_ustx is specified - let tx_fee_ustx = Some(1000); - let config_tomls = build_signer_config_tomls( - &[pk], - node_host, - None, - &network, - password, - rand::random(), - 3000, - None, - tx_fee_ustx, - ); - - let config = - RawConfigFile::load_from_str(&config_tomls[0]).expect("Failed to parse config file"); - - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(config.tx_fee_ustx, tx_fee_ustx); - - let config = GlobalConfig::try_from(config).expect("Failed to parse config"); - assert!(config.max_tx_fee_ustx.is_none()); - assert_eq!(Some(config.tx_fee_ustx), tx_fee_ustx); - } -} diff --git a/stacks-signer/src/coordinator.rs b/stacks-signer/src/coordinator.rs index 7fc2d238c48..234d1ade842 100644 --- a/stacks-signer/src/coordinator.rs +++ b/stacks-signer/src/coordinator.rs @@ -51,7 +51,7 @@ impl From for CoordinatorSelector { /// Create a new Coordinator selector from the given list of public keys fn from(public_keys: PublicKeys) -> Self { let coordinator_ids = - Self::calculate_coordinator_ids(&public_keys, &ConsensusHash::empty()); + CoordinatorSelector::calculate_coordinator_ids(&public_keys, &ConsensusHash::empty()); let coordinator_id = *coordinator_ids .first() .expect("FATAL: No registered signers"); @@ -69,9 +69,6 @@ impl From for CoordinatorSelector { } } -/// Whether or not to rotate to new coordinators in `update_coordinator` -const ROTATE_COORDINATORS: bool = false; - impl CoordinatorSelector { /// Update the coordinator id fn update_coordinator(&mut self, new_coordinator_ids: Vec) { @@ -84,17 +81,20 @@ impl CoordinatorSelector { .coordinator_ids .first() .expect("FATAL: No registered signers"); - if ROTATE_COORDINATORS && new_coordinator_id == self.coordinator_id { + if new_coordinator_id == self.coordinator_id { // If the newly selected coordinator is the same as the current and we have more than one available, advance immediately to the next if self.coordinator_ids.len() > 1 { new_index = new_index.saturating_add(1); } } new_index - } else if ROTATE_COORDINATORS { - self.coordinator_index.saturating_add(1) % self.coordinator_ids.len() } else { - self.coordinator_index + let mut new_index = self.coordinator_index.saturating_add(1); + if new_index == self.coordinator_ids.len() { + // We have exhausted all potential coordinators. Go back to the start + new_index = 0; + } + new_index }; self.coordinator_id = *self .coordinator_ids @@ -136,7 +136,7 @@ impl CoordinatorSelector { ) } - /// Calculate the ordered list of coordinator ids by comparing the provided public keys + /// Calculate the ordered list of coordinator ids by comparing the provided public keys against the pox consensus hash pub fn calculate_coordinator_ids( public_keys: &PublicKeys, pox_consensus_hash: &ConsensusHash, diff --git a/stacks-signer/src/lib.rs b/stacks-signer/src/lib.rs index 9dcd0a069f9..f3438e8bbc7 100644 --- a/stacks-signer/src/lib.rs +++ b/stacks-signer/src/lib.rs @@ -32,5 +32,3 @@ pub mod coordinator; pub mod runloop; /// The signer module for processing events pub mod signer; -/// The state module for the signer -pub mod signerdb; diff --git a/stacks-signer/src/main.rs b/stacks-signer/src/main.rs index 171bc5d43ea..e9c0af22f2e 100644 --- a/stacks-signer/src/main.rs +++ b/stacks-signer/src/main.rs @@ -46,7 +46,7 @@ use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::{debug, error}; use stacks_signer::cli::{ Cli, Command, GenerateFilesArgs, GenerateStackingSignatureArgs, GetChunkArgs, - GetLatestChunkArgs, PutChunkArgs, RunDkgArgs, RunSignerArgs, SignArgs, StackerDBArgs, + GetLatestChunkArgs, PutChunkArgs, RunDkgArgs, SignArgs, StackerDBArgs, }; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig}; use stacks_signer::runloop::{RunLoop, RunLoopCommand}; @@ -104,8 +104,8 @@ fn process_dkg_result(dkg_res: &[OperationResult]) { assert!(dkg_res.len() == 1, "Received unexpected number of results"); let dkg = dkg_res.first().unwrap(); match dkg { - OperationResult::Dkg(aggregate_key) => { - println!("Received aggregate group key: {aggregate_key}"); + OperationResult::Dkg(point) => { + println!("Received aggregate group key: {point}"); } OperationResult::Sign(signature) => { panic!( @@ -133,8 +133,8 @@ fn process_sign_result(sign_res: &[OperationResult]) { assert!(sign_res.len() == 1, "Received unexpected number of results"); let sign = sign_res.first().unwrap(); match sign { - OperationResult::Dkg(aggregate_key) => { - panic!("Received unexpected aggregate group key: {aggregate_key}"); + OperationResult::Dkg(point) => { + panic!("Received unexpected aggregate group key: {point}"); } OperationResult::Sign(signature) => { panic!( @@ -252,7 +252,7 @@ fn handle_dkg_sign(args: SignArgs) { spawned_signer.running_signer.stop(); } -fn handle_run(args: RunSignerArgs) { +fn handle_run(args: RunDkgArgs) { debug!("Running signer..."); let spawned_signer = spawn_running_signer(&args.config); println!("Signer spawned successfully. Waiting for messages to process..."); @@ -292,10 +292,6 @@ fn handle_generate_files(args: GenerateFilesArgs) { args.timeout.map(Duration::from_millis), &args.network, &args.password, - rand::random(), - 3000, - None, - None, ); debug!("Built {:?} signer config tomls.", signer_config_tomls.len()); for (i, file_contents) in signer_config_tomls.iter().enumerate() { @@ -324,28 +320,12 @@ fn handle_generate_stacking_signature( ) .expect("Failed to generate signature"); - let output_str = if args.json { - serde_json::to_string(&serde_json::json!({ - "signerKey": to_hex(&public_key.to_bytes_compressed()), - "signerSignature": to_hex(signature.to_rsv().as_slice()), - "authId": format!("{}", args.auth_id), - "rewardCycle": args.reward_cycle, - "maxAmount": format!("{}", args.max_amount), - "period": args.period, - "poxAddress": args.pox_address.to_b58(), - "method": args.method.topic().to_string(), - })) - .expect("Failed to serialize JSON") - } else { - format!( - "Signer Public Key: 0x{}\nSigner Key Signature: 0x{}\n\n", + if do_print { + println!( + "\nSigner Public Key: 0x{}\nSigner Key Signature: 0x{}\n\n", to_hex(&public_key.to_bytes_compressed()), to_hex(signature.to_rsv().as_slice()) // RSV is needed for Clarity - ) - }; - - if do_print { - println!("{}", output_str); + ); } signature @@ -418,7 +398,6 @@ pub mod tests { use super::{handle_generate_stacking_signature, *}; use crate::{GenerateStackingSignatureArgs, GlobalConfig}; - #[allow(clippy::too_many_arguments)] fn call_verify_signer_sig( pox_addr: &PoxAddress, reward_cycle: u128, @@ -467,7 +446,6 @@ pub mod tests { period: 12, max_amount: u128::MAX, auth_id: 1, - json: false, }; let signature = handle_generate_stacking_signature(args.clone(), false); @@ -522,7 +500,6 @@ pub mod tests { period: 12, max_amount: u128::MAX, auth_id: 1, - json: false, }; let signature = handle_generate_stacking_signature(args.clone(), false); diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 17b74c2fc90..46228149ab3 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -17,19 +17,22 @@ use std::collections::VecDeque; use std::sync::mpsc::Sender; use std::time::Duration; -use blockstack_lib::burnchains::PoxConstants; -use blockstack_lib::chainstate::stacks::boot::SIGNERS_NAME; +use blockstack_lib::chainstate::burn::ConsensusHashExtensions; +use blockstack_lib::chainstate::stacks::boot::{NakamotoSignerEntry, SIGNERS_NAME}; use blockstack_lib::util_lib::boot::boot_code_id; -use hashbrown::HashMap; -use libsigner::{SignerEntries, SignerEvent, SignerRunLoop}; +use libsigner::{SignerEvent, SignerRunLoop}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; -use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::chainstate::{ConsensusHash, StacksAddress, StacksPublicKey}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::{debug, error, info, warn}; -use wsts::state_machine::OperationResult; +use wsts::curve::ecdsa; +use wsts::curve::point::{Compressed, Point}; +use wsts::state_machine::coordinator::State as CoordinatorState; +use wsts::state_machine::{OperationResult, PublicKeys}; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; -use crate::config::{GlobalConfig, SignerConfig}; -use crate::signer::{Command as SignerCommand, Signer, SignerSlotID}; +use crate::config::{GlobalConfig, ParsedSignerEntries, SignerConfig}; +use crate::signer::{Command as SignerCommand, Signer, SignerSlotID, State as SignerState}; /// Which operation to perform #[derive(PartialEq, Clone, Debug)] @@ -41,48 +44,12 @@ pub struct RunLoopCommand { } /// The runloop state -#[derive(PartialEq, Eq, Debug, Clone, Copy)] +#[derive(PartialEq, Debug)] pub enum State { /// The runloop is uninitialized Uninitialized, - /// The runloop has no registered signers - NoRegisteredSigners, - /// The runloop has registered signers - RegisteredSigners, -} - -/// The current reward cycle info -#[derive(PartialEq, Eq, Debug, Clone, Copy)] -pub struct RewardCycleInfo { - /// The current reward cycle - pub reward_cycle: u64, - /// The total reward cycle length - pub reward_cycle_length: u64, - /// The prepare phase length - pub prepare_phase_block_length: u64, - /// The first burn block height - pub first_burnchain_block_height: u64, - /// The burnchain block height of the last query - pub last_burnchain_block_height: u64, -} - -impl RewardCycleInfo { - /// Check if the provided burnchain block height is part of the reward cycle - pub const fn is_in_reward_cycle(&self, burnchain_block_height: u64) -> bool { - let blocks_mined = burnchain_block_height.saturating_sub(self.first_burnchain_block_height); - let reward_cycle = blocks_mined / self.reward_cycle_length; - self.reward_cycle == reward_cycle - } - - /// Check if the provided burnchain block height is in the prepare phase - pub fn is_in_prepare_phase(&self, burnchain_block_height: u64) -> bool { - PoxConstants::static_is_in_prepare_phase( - self.first_burnchain_block_height, - self.reward_cycle_length, - self.prepare_phase_block_length, - burnchain_block_height, - ) - } + /// The runloop is initialized + Initialized, } /// The runloop for the stacks signer @@ -98,32 +65,80 @@ pub struct RunLoop { pub state: State, /// The commands received thus far pub commands: VecDeque, - /// The current reward cycle info. Only None if the runloop is uninitialized - pub current_reward_cycle_info: Option, } impl From for RunLoop { /// Creates new runloop from a config fn from(config: GlobalConfig) -> Self { let stacks_client = StacksClient::from(&config); - Self { + RunLoop { config, stacks_client, stacks_signers: HashMap::with_capacity(2), state: State::Uninitialized, commands: VecDeque::new(), - current_reward_cycle_info: None, } } } impl RunLoop { + /// Parse Nakamoto signer entries into relevant signer information + pub fn parse_nakamoto_signer_entries( + signers: &[NakamotoSignerEntry], + is_mainnet: bool, + ) -> ParsedSignerEntries { + let mut weight_end = 1; + let mut coordinator_key_ids = HashMap::with_capacity(4000); + let mut signer_key_ids = HashMap::with_capacity(signers.len()); + let mut signer_ids = HashMap::with_capacity(signers.len()); + let mut public_keys = PublicKeys { + signers: hashbrown::HashMap::with_capacity(signers.len()), + key_ids: hashbrown::HashMap::with_capacity(4000), + }; + let mut signer_public_keys = HashMap::with_capacity(signers.len()); + for (i, entry) in signers.iter().enumerate() { + // TODO: track these signer ids as non participating if any of the conversions fail + let signer_id = u32::try_from(i).expect("FATAL: number of signers exceeds u32::MAX"); + let ecdsa_public_key = ecdsa::PublicKey::try_from(entry.signing_key.as_slice()) + .expect("FATAL: corrupted signing key"); + let signer_public_key = Point::try_from(&Compressed::from(ecdsa_public_key.to_bytes())) + .expect("FATAL: corrupted signing key"); + let stacks_public_key = StacksPublicKey::from_slice(entry.signing_key.as_slice()) + .expect("FATAL: Corrupted signing key"); + + let stacks_address = StacksAddress::p2pkh(is_mainnet, &stacks_public_key); + signer_ids.insert(stacks_address, signer_id); + signer_public_keys.insert(signer_id, signer_public_key); + let weight_start = weight_end; + weight_end = weight_start + entry.weight; + for key_id in weight_start..weight_end { + public_keys.key_ids.insert(key_id, ecdsa_public_key); + public_keys.signers.insert(signer_id, ecdsa_public_key); + coordinator_key_ids + .entry(signer_id) + .or_insert(HashSet::with_capacity(entry.weight as usize)) + .insert(key_id); + signer_key_ids + .entry(signer_id) + .or_insert(Vec::with_capacity(entry.weight as usize)) + .push(key_id); + } + } + ParsedSignerEntries { + signer_ids, + public_keys, + signer_key_ids, + signer_public_keys, + coordinator_key_ids, + } + } + /// Get the registered signers for a specific reward cycle /// Returns None if no signers are registered or its not Nakamoto cycle pub fn get_parsed_reward_set( &self, reward_cycle: u64, - ) -> Result, ClientError> { + ) -> Result, ClientError> { debug!("Getting registered signers for reward cycle {reward_cycle}..."); let Some(signers) = self.stacks_client.get_reward_set_signers(reward_cycle)? else { warn!("No reward set signers found for reward cycle {reward_cycle}."); @@ -133,8 +148,10 @@ impl RunLoop { warn!("No registered signers found for reward cycle {reward_cycle}."); return Ok(None); } - let entries = SignerEntries::parse(self.config.network.is_mainnet(), &signers).unwrap(); - Ok(Some(entries)) + Ok(Some(Self::parse_nakamoto_signer_entries( + &signers, + self.config.network.is_mainnet(), + ))) } /// Get the stackerdb signer slots for a specific reward cycle @@ -207,24 +224,34 @@ impl RunLoop { nonce_timeout: self.config.nonce_timeout, sign_timeout: self.config.sign_timeout, tx_fee_ustx: self.config.tx_fee_ustx, - max_tx_fee_ustx: self.config.max_tx_fee_ustx, - db_path: self.config.db_path.clone(), }) } /// Refresh signer configuration for a specific reward cycle fn refresh_signer_config(&mut self, reward_cycle: u64) { let reward_index = reward_cycle % 2; - if let Some(new_signer_config) = self.get_signer_config(reward_cycle) { - let signer_id = new_signer_config.signer_id; - debug!("Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initializing signer state."); - if reward_cycle != 0 { + let mut needs_refresh = false; + if let Some(signer) = self.stacks_signers.get_mut(&reward_index) { + let old_reward_cycle = signer.reward_cycle; + if old_reward_cycle == reward_cycle { + //If the signer is already registered for the reward cycle, we don't need to do anything further here + debug!("Signer is configured for reward cycle {reward_cycle}.") + } else { + needs_refresh = true; + } + } else { + needs_refresh = true; + }; + if needs_refresh { + if let Some(new_signer_config) = self.get_signer_config(reward_cycle) { + let signer_id = new_signer_config.signer_id; + debug!("Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initializing signer state."); let prior_reward_cycle = reward_cycle.saturating_sub(1); let prior_reward_set = prior_reward_cycle % 2; if let Some(signer) = self.stacks_signers.get_mut(&prior_reward_set) { if signer.reward_cycle == prior_reward_cycle { // The signers have been calculated for the next reward cycle. Update the current one - debug!("{signer}: Next reward cycle ({reward_cycle}) signer set calculated. Reconfiguring current reward cycle signer."); + debug!("Signer #{}: Next reward cycle ({reward_cycle}) signer set calculated. Updating current reward cycle ({prior_reward_cycle}) signer.", signer.signer_id); signer.next_signer_addresses = new_signer_config .signer_entries .signer_ids @@ -234,87 +261,55 @@ impl RunLoop { signer.next_signer_slot_ids = new_signer_config.signer_slot_ids.clone(); } } + self.stacks_signers + .insert(reward_index, Signer::from(new_signer_config)); + debug!("Signer #{signer_id} for reward cycle {reward_cycle} initialized. Initialized {} signers", self.stacks_signers.len()); + } else { + warn!("Signer is not registered for reward cycle {reward_cycle}. Waiting for confirmed registration..."); } - let new_signer = Signer::from(new_signer_config); - info!("{new_signer} initialized."); - self.stacks_signers.insert(reward_index, new_signer); - } else { - warn!("Signer is not registered for reward cycle {reward_cycle}. Waiting for confirmed registration..."); } } - fn initialize_runloop(&mut self) -> Result<(), ClientError> { - debug!("Initializing signer runloop..."); - let reward_cycle_info = retry_with_exponential_backoff(|| { - self.stacks_client - .get_current_reward_cycle_info() - .map_err(backoff::Error::transient) - })?; - let current_reward_cycle = reward_cycle_info.reward_cycle; + /// Refresh the signer configuration by retrieving the necessary information from the stacks node + /// Note: this will trigger DKG if required + fn refresh_signers(&mut self, current_reward_cycle: u64) -> Result<(), ClientError> { + let next_reward_cycle = current_reward_cycle.saturating_add(1); self.refresh_signer_config(current_reward_cycle); - // We should only attempt to initialize the next reward cycle signer if we are in the prepare phase of the next reward cycle - if reward_cycle_info.is_in_prepare_phase(reward_cycle_info.last_burnchain_block_height) { - self.refresh_signer_config(current_reward_cycle.saturating_add(1)); - } - self.current_reward_cycle_info = Some(reward_cycle_info); - if self.stacks_signers.is_empty() { - self.state = State::NoRegisteredSigners; - } else { - self.state = State::RegisteredSigners; - } - Ok(()) - } - - fn refresh_runloop(&mut self, current_burn_block_height: u64) -> Result<(), ClientError> { - let reward_cycle_info = self - .current_reward_cycle_info - .as_mut() - .expect("FATAL: cannot be an initialized signer with no reward cycle info."); - // First ensure we refresh our view of the current reward cycle information - if !reward_cycle_info.is_in_reward_cycle(current_burn_block_height) { - let new_reward_cycle_info = retry_with_exponential_backoff(|| { - self.stacks_client - .get_current_reward_cycle_info() - .map_err(backoff::Error::transient) - })?; - *reward_cycle_info = new_reward_cycle_info; - } - let current_reward_cycle = reward_cycle_info.reward_cycle; - // We should only attempt to refresh the signer if we are not configured for the next reward cycle yet and we received a new burn block for its prepare phase - if reward_cycle_info.is_in_prepare_phase(current_burn_block_height) { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if self - .stacks_signers - .get(&(next_reward_cycle % 2)) - .map(|signer| signer.reward_cycle != next_reward_cycle) - .unwrap_or(true) - { - info!("Received a new burnchain block height ({current_burn_block_height}) in the prepare phase of the next reward cycle ({next_reward_cycle}). Checking for signer registration..."); - self.refresh_signer_config(next_reward_cycle); + self.refresh_signer_config(next_reward_cycle); + // TODO: do not use an empty consensus hash + let pox_consensus_hash = ConsensusHash::empty(); + for signer in self.stacks_signers.values_mut() { + let old_coordinator_id = signer.coordinator_selector.get_coordinator().0; + let updated_coordinator_id = signer + .coordinator_selector + .refresh_coordinator(&pox_consensus_hash); + if old_coordinator_id != updated_coordinator_id { + debug!( + "Signer #{}: Coordinator updated. Resetting state to Idle.", signer.signer_id; + "old_coordinator_id" => {old_coordinator_id}, + "updated_coordinator_id" => {updated_coordinator_id}, + "pox_consensus_hash" => %pox_consensus_hash + ); + signer.coordinator.state = CoordinatorState::Idle; + signer.state = SignerState::Idle; + } + if signer.approved_aggregate_public_key.is_none() { + retry_with_exponential_backoff(|| { + signer + .update_dkg(&self.stacks_client) + .map_err(backoff::Error::transient) + })?; } } - self.cleanup_stale_signers(current_reward_cycle); if self.stacks_signers.is_empty() { - self.state = State::NoRegisteredSigners; - } else { - self.state = State::RegisteredSigners; + info!("Signer is not registered for the current {current_reward_cycle} or next {next_reward_cycle} reward cycles. Waiting for confirmed registration..."); + self.state = State::Uninitialized; + return Err(ClientError::NotRegistered); } + self.state = State::Initialized; + info!("Runloop successfully initialized!"); Ok(()) } - - fn cleanup_stale_signers(&mut self, current_reward_cycle: u64) { - let mut to_delete = Vec::new(); - for (idx, signer) in &mut self.stacks_signers { - if signer.reward_cycle < current_reward_cycle { - debug!("{signer}: Signer's tenure has completed."); - to_delete.push(*idx); - continue; - } - } - for idx in to_delete { - self.stacks_signers.remove(&idx); - } - } } impl SignerRunLoop, RunLoopCommand> for RunLoop { @@ -339,76 +334,49 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { if let Some(cmd) = cmd { self.commands.push_back(cmd); } - if self.state == State::Uninitialized { - if let Err(e) = self.initialize_runloop() { - error!("Failed to initialize signer runloop: {e}."); - if let Some(event) = event { - warn!("Ignoring event: {event:?}"); - } - return None; - } - } else if let Some(SignerEvent::NewBurnBlock(current_burn_block_height)) = event { - if let Err(e) = self.refresh_runloop(current_burn_block_height) { - error!("Failed to refresh signer runloop: {e}."); - warn!("Signer may have an outdated view of the network."); - } - } - let current_reward_cycle = self - .current_reward_cycle_info - .as_ref() - .expect("FATAL: cannot be an initialized signer with no reward cycle info.") - .reward_cycle; - if self.state == State::NoRegisteredSigners { - let next_reward_cycle = current_reward_cycle.saturating_add(1); - if let Some(event) = event { - info!("Signer is not registered for the current reward cycle ({current_reward_cycle}) or next reward cycle ({next_reward_cycle}). Waiting for confirmed registration..."); + // TODO: queue events and process them potentially after initialization success (similar to commands)? + let Ok(current_reward_cycle) = retry_with_exponential_backoff(|| { + self.stacks_client + .get_current_reward_cycle() + .map_err(backoff::Error::transient) + }) else { + error!("Failed to retrieve current reward cycle"); + warn!("Ignoring event: {event:?}"); + return None; + }; + if let Err(e) = self.refresh_signers(current_reward_cycle) { + if self.state == State::Uninitialized { + // If we were never actually initialized, we cannot process anything. Just return. + warn!("Failed to initialize signers. Are you sure this signer is correctly registered for the current or next reward cycle?"); warn!("Ignoring event: {event:?}"); + return None; } - return None; + error!("Failed to refresh signers: {e}. Signer may have an outdated view of the network. Attempting to process event anyway."); } for signer in self.stacks_signers.values_mut() { - let event_parity = match event { - Some(SignerEvent::BlockValidationResponse(_)) => Some(current_reward_cycle % 2), - // Block proposal events do have reward cycles, but each proposal has its own cycle, - // and the vec could be heterogenous, so, don't differentiate. - Some(SignerEvent::MinerMessages(..)) - | Some(SignerEvent::NewBurnBlock(_)) - | Some(SignerEvent::StatusCheck) - | None => None, - Some(SignerEvent::SignerMessages(msg_parity, ..)) => { - Some(u64::from(msg_parity) % 2) - } - }; - let other_signer_parity = (signer.reward_cycle + 1) % 2; - if event_parity == Some(other_signer_parity) { - continue; - } - - if signer.approved_aggregate_public_key.is_none() { - if let Err(e) = signer.update_dkg(&self.stacks_client) { - error!("{signer}: failed to update DKG: {e}"); - } - } - signer.refresh_coordinator(); if let Err(e) = signer.process_event( &self.stacks_client, event.as_ref(), res.clone(), current_reward_cycle, ) { - error!("{signer}: errored processing event: {e}"); + error!( + "Signer #{} for reward cycle {} errored processing event: {e}", + signer.signer_id, signer.reward_cycle + ); } if let Some(command) = self.commands.pop_front() { let reward_cycle = command.reward_cycle; if signer.reward_cycle != reward_cycle { warn!( - "{signer}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}" + "Signer #{}: not registered for reward cycle {reward_cycle}. Ignoring command: {command:?}", signer.signer_id ); } else { info!( - "{signer}: Queuing an external runloop command ({:?}): {command:?}", + "Signer #{}: Queuing an external runloop command ({:?}): {command:?}", + signer.signer_id, signer - .state_machine + .signing_round .public_keys .signers .get(&signer.signer_id) @@ -417,7 +385,7 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { } } // After processing event, run the next command for each signer - signer.process_next_command(&self.stacks_client, current_reward_cycle); + signer.process_next_command(&self.stacks_client); } None } @@ -425,11 +393,9 @@ impl SignerRunLoop, RunLoopCommand> for RunLoop { #[cfg(test)] mod tests { use blockstack_lib::chainstate::stacks::boot::NakamotoSignerEntry; - use libsigner::SignerEntries; - use rand::{thread_rng, Rng, RngCore}; use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; - use super::RewardCycleInfo; + use super::RunLoop; #[test] fn parse_nakamoto_signer_entries_test() { @@ -447,7 +413,7 @@ mod tests { }); } - let parsed_entries = SignerEntries::parse(false, &signer_entries).unwrap(); + let parsed_entries = RunLoop::parse_nakamoto_signer_entries(&signer_entries, false); assert_eq!(parsed_entries.signer_ids.len(), nmb_signers); let mut signer_ids = parsed_entries.signer_ids.into_values().collect::>(); signer_ids.sort(); @@ -456,79 +422,4 @@ mod tests { (0..nmb_signers).map(|id| id as u32).collect::>() ); } - - #[test] - fn is_in_reward_cycle_info() { - let rand_byte: u8 = std::cmp::max(1, thread_rng().gen()); - let prepare_phase_block_length = rand_byte as u64; - // Ensure the reward cycle is not close to u64 Max to prevent overflow when adding prepare phase len - let reward_cycle_length = (std::cmp::max( - prepare_phase_block_length.wrapping_add(1), - thread_rng().next_u32() as u64, - )) - .wrapping_add(prepare_phase_block_length); - let reward_cycle_phase_block_length = - reward_cycle_length.wrapping_sub(prepare_phase_block_length); - let first_burnchain_block_height = std::cmp::max(1u8, thread_rng().gen()) as u64; - let last_burnchain_block_height = thread_rng().gen_range( - first_burnchain_block_height - ..first_burnchain_block_height - .wrapping_add(reward_cycle_length) - .wrapping_sub(prepare_phase_block_length), - ); - let blocks_mined = last_burnchain_block_height.wrapping_sub(first_burnchain_block_height); - let reward_cycle = blocks_mined / reward_cycle_length; - - let reward_cycle_info = RewardCycleInfo { - reward_cycle, - reward_cycle_length, - prepare_phase_block_length, - first_burnchain_block_height, - last_burnchain_block_height, - }; - assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height)); - assert!(!reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height)); - - assert!(reward_cycle_info.is_in_reward_cycle(last_burnchain_block_height)); - assert!(!reward_cycle_info.is_in_prepare_phase(last_burnchain_block_height)); - - assert!(!reward_cycle_info - .is_in_reward_cycle(first_burnchain_block_height.wrapping_add(reward_cycle_length))); - assert!(!reward_cycle_info - .is_in_prepare_phase(!first_burnchain_block_height.wrapping_add(reward_cycle_length))); - - assert!(reward_cycle_info.is_in_reward_cycle( - first_burnchain_block_height - .wrapping_add(reward_cycle_length) - .wrapping_sub(1) - )); - assert!(reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height - .wrapping_add(reward_cycle_length) - .wrapping_sub(1) - )); - - assert!(reward_cycle_info.is_in_reward_cycle( - first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) - )); - assert!(!reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height.wrapping_add(reward_cycle_phase_block_length) - )); - - assert!(reward_cycle_info.is_in_reward_cycle(first_burnchain_block_height.wrapping_add(1))); - assert!( - !reward_cycle_info.is_in_prepare_phase(first_burnchain_block_height.wrapping_add(1)) - ); - - assert!(reward_cycle_info.is_in_reward_cycle( - first_burnchain_block_height - .wrapping_add(reward_cycle_phase_block_length) - .wrapping_add(1) - )); - assert!(reward_cycle_info.is_in_prepare_phase( - first_burnchain_block_height - .wrapping_add(reward_cycle_phase_block_length) - .wrapping_add(1) - )); - } } diff --git a/stacks-signer/src/signer.rs b/stacks-signer/src/signer.rs index 3da523eeaf1..9bab0ab6ca5 100644 --- a/stacks-signer/src/signer.rs +++ b/stacks-signer/src/signer.rs @@ -14,25 +14,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . use std::collections::VecDeque; -use std::path::PathBuf; use std::sync::mpsc::Sender; use std::time::Instant; -use blockstack_lib::chainstate::burn::ConsensusHashExtensions; use blockstack_lib::chainstate::nakamoto::signer_set::NakamotoSigners; use blockstack_lib::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote}; +use blockstack_lib::chainstate::stacks::boot::SIGNERS_VOTING_FUNCTION_NAME; use blockstack_lib::chainstate::stacks::StacksTransaction; use blockstack_lib::net::api::postblock_proposal::BlockValidateResponse; -use hashbrown::HashSet; -use libsigner::{ - BlockProposalSigners, BlockRejection, BlockResponse, MessageSlotID, RejectCode, SignerEvent, - SignerMessage, -}; -use serde_derive::{Deserialize, Serialize}; +use libsigner::{BlockRejection, BlockResponse, RejectCode, SignerEvent, SignerMessage}; use slog::{slog_debug, slog_error, slog_info, slog_warn}; use stacks_common::codec::{read_next, StacksMessageCodec}; -use stacks_common::types::chainstate::{ConsensusHash, StacksAddress}; -use stacks_common::types::StacksEpochId; +use stacks_common::types::chainstate::StacksAddress; +use stacks_common::types::{StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::{debug, error, info, warn}; use wsts::common::{MerkleRoot, Signature}; @@ -43,15 +37,13 @@ use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; use wsts::state_machine::coordinator::{ Config as CoordinatorConfig, Coordinator, State as CoordinatorState, }; -use wsts::state_machine::signer::Signer as SignerStateMachine; +use wsts::state_machine::signer::Signer as WSTSSigner; use wsts::state_machine::{OperationResult, SignError}; -use wsts::traits::Signer as _; use wsts::v2; -use crate::client::{ClientError, StackerDB, StacksClient}; +use crate::client::{retry_with_exponential_backoff, ClientError, StackerDB, StacksClient}; use crate::config::SignerConfig; use crate::coordinator::CoordinatorSelector; -use crate::signerdb::SignerDb; /// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID #[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] @@ -64,23 +56,22 @@ impl std::fmt::Display for SignerSlotID { } /// Additional Info about a proposed block -#[derive(Serialize, Deserialize, Debug, PartialEq)] pub struct BlockInfo { /// The block we are considering - pub block: NakamotoBlock, + block: NakamotoBlock, /// Our vote on the block if we have one yet - pub vote: Option, + vote: Option, /// Whether the block contents are valid valid: Option, /// The associated packet nonce request if we have one nonce_request: Option, /// Whether this block is already being signed over - pub signed_over: bool, + signed_over: bool, } impl BlockInfo { /// Create a new BlockInfo - pub const fn new(block: NakamotoBlock) -> Self { + pub fn new(block: NakamotoBlock) -> Self { Self { block, vote: None, @@ -91,7 +82,7 @@ impl BlockInfo { } /// Create a new BlockInfo with an associated nonce request packet - pub const fn new_with_request(block: NakamotoBlock, nonce_request: NonceRequest) -> Self { + pub fn new_with_request(block: NakamotoBlock, nonce_request: NonceRequest) -> Self { Self { block, vote: None, @@ -100,11 +91,6 @@ impl BlockInfo { signed_over: true, } } - - /// Return the block's signer signature hash - pub fn signer_signature_hash(&self) -> Sha512Trunc256Sum { - self.block.header.signer_signature_hash() - } } /// Which signer operation to perform @@ -124,7 +110,7 @@ pub enum Command { } /// The Signer state -#[derive(PartialEq, Eq, Debug, Clone)] +#[derive(PartialEq, Debug, Clone)] pub enum State { /// The signer is idle, waiting for messages and commands Idle, @@ -137,9 +123,12 @@ pub struct Signer { /// The coordinator for inbound messages for a specific reward cycle pub coordinator: FireCoordinator, /// The signing round used to sign messages for a specific reward cycle - pub state_machine: SignerStateMachine, + pub signing_round: WSTSSigner, /// the state of the signer pub state: State, + /// Observed blocks that we have seen so far + // TODO: cleanup storage and garbage collect this stuff + pub blocks: HashMap, /// Received Commands that need to be processed pub commands: VecDeque, /// The stackerdb client @@ -158,85 +147,24 @@ pub struct Signer { pub next_signer_addresses: Vec, /// The reward cycle this signer belongs to pub reward_cycle: u64, - /// The default tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0). + /// The tx fee in uSTX to use if the epoch is pre Nakamoto (Epoch 3.0) pub tx_fee_ustx: u64, - /// If estimating the tx fee, the max tx fee in uSTX to use when the epoch is pre Nakamoto (Epoch 3.0) - /// If None, will not cap the fee. - pub max_tx_fee_ustx: Option, /// The coordinator info for the signer pub coordinator_selector: CoordinatorSelector, /// The approved key registered to the contract pub approved_aggregate_public_key: Option, - /// The current active miner's key (if we know it!) - pub miner_key: Option, - /// Signer DB path - pub db_path: PathBuf, - /// SignerDB for state management - pub signer_db: SignerDb, -} - -impl std::fmt::Display for Signer { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "Cycle #{} Signer #{}(C:{})", - self.reward_cycle, - self.signer_id, - self.coordinator_selector.get_coordinator().0, - ) - } -} - -impl Signer { - /// Return the current coordinator. - /// If the current reward cycle is the active reward cycle, this is the miner, - /// so the first element of the tuple will be None (because the miner does not have a signer index). - /// Otherwise, the coordinator is the signer with the index returned by the coordinator selector. - fn get_coordinator_sign(&self, current_reward_cycle: u64) -> (Option, PublicKey) { - if self.reward_cycle == current_reward_cycle { - let Some(ref cur_miner) = self.miner_key else { - error!( - "Signer #{}: Could not lookup current miner while in active reward cycle", - self.signer_id - ); - let selected = self.coordinator_selector.get_coordinator(); - return (Some(selected.0), selected.1); - }; - // coordinator is the current miner. - (None, *cur_miner) - } else { - let selected = self.coordinator_selector.get_coordinator(); - (Some(selected.0), selected.1) - } - } - - /// Get the current coordinator for executing DKG - /// This will always use the coordinator selector to determine the coordinator - fn get_coordinator_dkg(&self) -> (u32, PublicKey) { - self.coordinator_selector.get_coordinator() - } } impl From for Signer { fn from(signer_config: SignerConfig) -> Self { let stackerdb = StackerDB::from(&signer_config); - let num_signers = signer_config - .signer_entries - .count_signers() + let num_signers = u32::try_from(signer_config.signer_entries.public_keys.signers.len()) .expect("FATAL: Too many registered signers to fit in a u32"); - let num_keys = signer_config - .signer_entries - .count_keys() - .expect("FATAL: Too many key ids to fit in a u32"); - let threshold = signer_config - .signer_entries - .get_signing_threshold() - .expect("FATAL: Too many key ids to fit in a u32"); - let dkg_threshold = signer_config - .signer_entries - .get_dkg_threshold() + let num_keys = u32::try_from(signer_config.signer_entries.public_keys.key_ids.len()) .expect("FATAL: Too many key ids to fit in a u32"); + let threshold = (num_keys as f64 * 7_f64 / 10_f64).ceil() as u32; + let dkg_threshold = (num_keys as f64 * 9_f64 / 10_f64).ceil() as u32; let coordinator_config = CoordinatorConfig { threshold, @@ -249,48 +177,34 @@ impl From for Signer { dkg_end_timeout: signer_config.dkg_end_timeout, nonce_timeout: signer_config.nonce_timeout, sign_timeout: signer_config.sign_timeout, - signer_key_ids: signer_config.signer_entries.coordinator_key_ids, - signer_public_keys: signer_config.signer_entries.signer_public_keys, + signer_key_ids: signer_config.signer_entries.coordinator_key_ids.into(), + signer_public_keys: signer_config.signer_entries.signer_public_keys.into(), }; let coordinator = FireCoordinator::new(coordinator_config); - let coordinator_selector = - CoordinatorSelector::from(signer_config.signer_entries.public_keys.clone()); - - debug!( - "Reward cycle #{} Signer #{}: initial coordinator is signer {}", - signer_config.reward_cycle, - signer_config.signer_id, - coordinator_selector.get_coordinator().0 - ); - let signer_db = - SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); - - let mut state_machine = SignerStateMachine::new( + let signing_round = WSTSSigner::new( threshold, num_signers, num_keys, signer_config.signer_id, signer_config.key_ids, signer_config.ecdsa_private_key, - signer_config.signer_entries.public_keys, + signer_config.signer_entries.public_keys.clone(), ); + let coordinator_selector = + CoordinatorSelector::from(signer_config.signer_entries.public_keys); - if let Some(state) = signer_db - .get_signer_state(signer_config.reward_cycle) - .expect("Failed to load signer state") - { - debug!( - "Reward cycle #{} Signer #{}: Loading signer", - signer_config.reward_cycle, signer_config.signer_id - ); - state_machine.signer = v2::Signer::load(&state); - } + debug!( + "Signer #{}: initial coordinator is signer {}", + signer_config.signer_id, + coordinator_selector.get_coordinator().0 + ); Self { coordinator, - state_machine, + signing_round, state: State::Idle, + blocks: HashMap::new(), commands: VecDeque::new(), stackerdb, mainnet: signer_config.mainnet, @@ -305,37 +219,13 @@ impl From for Signer { next_signer_addresses: vec![], reward_cycle: signer_config.reward_cycle, tx_fee_ustx: signer_config.tx_fee_ustx, - max_tx_fee_ustx: signer_config.max_tx_fee_ustx, coordinator_selector, approved_aggregate_public_key: None, - miner_key: None, - db_path: signer_config.db_path, - signer_db, } } } impl Signer { - /// Refresh the coordinator selector - pub fn refresh_coordinator(&mut self) { - // TODO: do not use an empty consensus hash - let pox_consensus_hash = ConsensusHash::empty(); - let old_coordinator_id = self.coordinator_selector.get_coordinator().0; - let updated_coordinator_id = self - .coordinator_selector - .refresh_coordinator(&pox_consensus_hash); - if old_coordinator_id != updated_coordinator_id { - debug!( - "{self}: Coordinator updated. Resetting state to Idle."; - "old_coordinator_id" => {old_coordinator_id}, - "updated_coordinator_id" => {updated_coordinator_id}, - "pox_consensus_hash" => %pox_consensus_hash - ); - self.coordinator.state = CoordinatorState::Idle; - self.state = State::Idle; - } - } - /// Finish an operation and update the coordinator selector accordingly fn finish_operation(&mut self) { self.state = State::Idle; @@ -353,30 +243,35 @@ impl Signer { match command { Command::Dkg => { if self.approved_aggregate_public_key.is_some() { - debug!("Reward cycle #{} Signer #{}: Already have an aggregate key. Ignoring DKG command.", self.reward_cycle, self.signer_id); + debug!("Signer #{}: Already have an aggregate key for reward cycle {}. Ignoring DKG command.", self.signer_id, self.reward_cycle); return; } - let vote_round = match stacks_client.get_last_round(self.reward_cycle) { + let vote_round = match retry_with_exponential_backoff(|| { + stacks_client + .get_last_round(self.reward_cycle) + .map_err(backoff::Error::transient) + }) { Ok(last_round) => last_round, Err(e) => { - error!("{self}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}"); + error!("Signer #{}: Unable to perform DKG. Failed to get last round from stacks node: {e:?}", self.signer_id); return; } }; // The dkg id will increment internally following "start_dkg_round" so do not increment it here self.coordinator.current_dkg_id = vote_round.unwrap_or(0); info!( - "{self}: Starting DKG vote"; + "Signer #{}: Starting DKG vote", + self.signer_id; "round" => self.coordinator.current_dkg_id.wrapping_add(1), "cycle" => self.reward_cycle, ); match self.coordinator.start_dkg_round() { Ok(msg) => { let ack = self.stackerdb.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); + debug!("Signer #{}: ACK: {ack:?}", self.signer_id); } Err(e) => { - error!("{self}: Failed to start DKG: {e:?}",); + error!("Signer #{}: Failed to start DKG: {e:?}", self.signer_id); return; } } @@ -387,20 +282,19 @@ impl Signer { merkle_root, } => { if self.approved_aggregate_public_key.is_none() { - debug!("{self}: Cannot sign a block without an approved aggregate public key. Ignore it."); + debug!("Signer #{}: Cannot sign a block without an approved aggregate public key. Ignore it.", self.signer_id); return; } let signer_signature_hash = block.header.signer_signature_hash(); - let mut block_info = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .unwrap_or_else(|_| Some(BlockInfo::new(block.clone()))) - .unwrap_or_else(|| BlockInfo::new(block.clone())); + let block_info = self + .blocks + .entry(signer_signature_hash) + .or_insert_with(|| BlockInfo::new(block.clone())); if block_info.signed_over { - debug!("{self}: Received a sign command for a block we are already signing over. Ignore it."); + debug!("Signer #{}: Received a sign command for a block we are already signing over. Ignore it.", self.signer_id); return; } - info!("{self}: Signing block"; + info!("Signer #{}: Signing block", self.signer_id; "block_consensus_hash" => %block.header.consensus_hash, "block_height" => block.header.chain_length, "pre_sign_block_id" => %block.block_id(), @@ -412,16 +306,14 @@ impl Signer { ) { Ok(msg) => { let ack = self.stackerdb.send_message_with_retry(msg.into()); - debug!("{self}: ACK: {ack:?}",); + debug!("Signer #{}: ACK: {ack:?}", self.signer_id); block_info.signed_over = true; - self.signer_db - .insert_block(self.reward_cycle, &block_info) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); } Err(e) => { - error!("{self}: Failed to start signing block: {e:?}",); + error!( + "Signer #{}: Failed to start signing block: {e:?}", + self.signer_id + ); return; } } @@ -431,40 +323,31 @@ impl Signer { } /// Attempt to process the next command in the queue, and update state accordingly - pub fn process_next_command( - &mut self, - stacks_client: &StacksClient, - current_reward_cycle: u64, - ) { + pub fn process_next_command(&mut self, stacks_client: &StacksClient) { + let coordinator_id = self.coordinator_selector.get_coordinator().0; match &self.state { State::Idle => { - let Some(command) = self.commands.front() else { - debug!("{self}: Nothing to process. Waiting for command..."); + if coordinator_id != self.signer_id { + debug!( + "Signer #{}: Coordinator is {coordinator_id:?}. Will not process any commands...", + self.signer_id + ); return; - }; - let coordinator_id = if matches!(command, Command::Dkg) { - // We cannot execute a DKG command if we are not the coordinator - Some(self.get_coordinator_dkg().0) + } + if let Some(command) = self.commands.pop_front() { + self.execute_command(stacks_client, &command); } else { - self.get_coordinator_sign(current_reward_cycle).0 - }; - if coordinator_id != Some(self.signer_id) { debug!( - "{self}: Coordinator is {coordinator_id:?}. Will not process any commands...", + "Signer #{}: Nothing to process. Waiting for command...", + self.signer_id ); - return; } - let command = self - .commands - .pop_front() - .expect("BUG: Already asserted that the command queue was not empty"); - self.execute_command(stacks_client, &command); } State::OperationInProgress => { // We cannot execute the next command until the current one is finished... debug!( - "{self}: Waiting for operation to finish. Coordinator state = {:?}", - self.coordinator.state + "Signer #{}: Waiting for coordinator {coordinator_id:?} operation to finish...", + self.signer_id, ); } } @@ -476,89 +359,89 @@ impl Signer { stacks_client: &StacksClient, block_validate_response: &BlockValidateResponse, res: Sender>, - current_reward_cycle: u64, ) { - let mut block_info = match block_validate_response { + let block_info = match block_validate_response { BlockValidateResponse::Ok(block_validate_ok) => { let signer_signature_hash = block_validate_ok.signer_signature_hash; // For mutability reasons, we need to take the block_info out of the map and add it back after processing - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}",); - return; - } + let Some(mut block_info) = self.blocks.remove(&signer_signature_hash) else { + // We have not seen this block before. Why are we getting a response for it? + debug!("Signer #{}: Received a block validate response for a block we have not seen before. Ignoring...", self.signer_id); + return; }; let is_valid = self.verify_block_transactions(stacks_client, &block_info.block); block_info.valid = Some(is_valid); - self.signer_db - .insert_block(self.reward_cycle, &block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); info!( - "{self}: Treating block validation for block {} as valid: {:?}", + "Signer #{}: Treating block validation for block {} as valid: {:?}", + self.signer_id, &block_info.block.block_id(), block_info.valid ); - block_info + // Add the block info back to the map + self.blocks + .entry(signer_signature_hash) + .or_insert(block_info) } BlockValidateResponse::Reject(block_validate_reject) => { let signer_signature_hash = block_validate_reject.signer_signature_hash; - let mut block_info = match self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - { - Ok(Some(block_info)) => block_info, - Ok(None) => { - // We have not seen this block before. Why are we getting a response for it? - debug!("{self}: Received a block validate response for a block we have not seen before. Ignoring..."); - return; - } - Err(e) => { - error!("{self}: Failed to lookup block in signer db: {e:?}"); - return; - } + let Some(block_info) = self.blocks.get_mut(&signer_signature_hash) else { + // We have not seen this block before. Why are we getting a response for it? + debug!("Signer #{}: Received a block validate response for a block we have not seen before. Ignoring...", self.signer_id); + return; }; block_info.valid = Some(false); // Submit a rejection response to the .signers contract for miners // to observe so they know to send another block and to prove signers are doing work); - warn!("{self}: Broadcasting a block rejection due to stacks node validation failure..."); + warn!("Signer #{}: Broadcasting a block rejection due to stacks node validation failure...", self.signer_id); if let Err(e) = self .stackerdb .send_message_with_retry(block_validate_reject.clone().into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + warn!( + "Signer #{}: Failed to send block rejection to stacker-db: {e:?}", + self.signer_id + ); } block_info } }; if let Some(mut nonce_request) = block_info.nonce_request.take() { - debug!("{self}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request..."); + debug!("Signer #{}: Received a block validate response from the stacks node for a block we already received a nonce request for. Responding to the nonce request...", self.signer_id); // We have received validation from the stacks node. Determine our vote and update the request message - self.determine_vote(&mut block_info, &mut nonce_request); + Self::determine_vote(self.signer_id, block_info, &mut nonce_request); // Send the nonce request through with our vote let packet = Packet { msg: Message::NonceRequest(nonce_request), sig: vec![], }; - self.handle_packets(stacks_client, res, &[packet], current_reward_cycle); + self.handle_packets(stacks_client, res, &[packet]); + } else { + let coordinator_id = self.coordinator_selector.get_coordinator().0; + if block_info.valid.unwrap_or(false) + && !block_info.signed_over + && coordinator_id == self.signer_id + { + // We are the coordinator. Trigger a signing round for this block + debug!( + "Signer #{}: triggering a signing round over the block {}", + self.signer_id, + block_info.block.header.block_hash() + ); + self.commands.push_back(Command::Sign { + block: block_info.block.clone(), + is_taproot: false, + merkle_root: None, + }); + } else { + debug!( + "Signer #{} ignoring block.", self.signer_id; + "block_hash" => block_info.block.header.block_hash(), + "valid" => block_info.valid, + "signed_over" => block_info.signed_over, + "coordinator_id" => coordinator_id, + ); + } } - debug!( - "{self}: Received a block validate response"; - "block_hash" => block_info.block.header.block_hash(), - "valid" => block_info.valid, - "signed_over" => block_info.signed_over, - ); - self.signer_db - .insert_block(self.reward_cycle, &block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); } /// Handle signer messages submitted to signers stackerdb @@ -567,98 +450,41 @@ impl Signer { stacks_client: &StacksClient, res: Sender>, messages: &[SignerMessage], - current_reward_cycle: u64, ) { + let coordinator_pubkey = self.coordinator_selector.get_coordinator().1; let packets: Vec = messages .iter() .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::Transactions(_) => None, + SignerMessage::BlockResponse(_) | SignerMessage::Transactions(_) => None, // TODO: if a signer tries to trigger DKG and we already have one set in the contract, ignore the request. SignerMessage::Packet(packet) => { - let coordinator_pubkey = if Self::is_dkg_message(&packet.msg) { - self.get_coordinator_dkg().1 - } else { - self.get_coordinator_sign(current_reward_cycle).1 - }; self.verify_packet(stacks_client, packet.clone(), &coordinator_pubkey) } }) .collect(); - self.handle_packets(stacks_client, res, &packets, current_reward_cycle); + self.handle_packets(stacks_client, res, &packets); } /// Handle proposed blocks submitted by the miners to stackerdb - fn handle_proposed_blocks( - &mut self, - stacks_client: &StacksClient, - proposals: &[BlockProposalSigners], - ) { - for proposal in proposals { - if proposal.reward_cycle != self.reward_cycle { - debug!( - "{self}: Received proposal for block outside of my reward cycle, ignoring."; - "proposal_reward_cycle" => proposal.reward_cycle, - "proposal_burn_height" => proposal.burn_height, - ); - continue; - } - let sig_hash = proposal.block.header.signer_signature_hash(); - match self.signer_db.block_lookup(self.reward_cycle, &sig_hash) { - Ok(Some(block)) => { - debug!( - "{self}: Received proposal for block already known, ignoring new proposal."; - "signer_sighash" => %sig_hash, - "proposal_burn_height" => proposal.burn_height, - "vote" => ?block.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }), - "signed_over" => block.signed_over, - ); - continue; - } - Ok(None) => { - // Store the block in our cache - self.signer_db - .insert_block(self.reward_cycle, &BlockInfo::new(proposal.block.clone())) - .unwrap_or_else(|e| { - error!("{self}: Failed to insert block in DB: {e:?}"); - }); - // Submit the block for validation - stacks_client - .submit_block_for_validation(proposal.block.clone()) - .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}"); - }); - } - Err(e) => { - error!( - "{self}: Failed to lookup block in DB: {e:?}. Dropping proposal request." + fn handle_proposed_blocks(&mut self, stacks_client: &StacksClient, blocks: &[NakamotoBlock]) { + for block in blocks { + // Store the block in our cache + self.blocks.insert( + block.header.signer_signature_hash(), + BlockInfo::new(block.clone()), + ); + // Submit the block for validation + stacks_client + .submit_block_for_validation(block.clone()) + .unwrap_or_else(|e| { + warn!( + "Signer #{}: Failed to submit block for validation: {e:?}", + self.signer_id ); - continue; - } - } + }); } } - /// Helper function for determining if the provided message is a DKG specific message - fn is_dkg_message(msg: &Message) -> bool { - matches!( - msg, - Message::DkgBegin(_) - | Message::DkgEnd(_) - | Message::DkgEndBegin(_) - | Message::DkgPrivateBegin(_) - | Message::DkgPrivateShares(_) - | Message::DkgPublicShares(_) - ) - } - /// Process inbound packets as both a signer and a coordinator /// Will send outbound packets and operation results as appropriate fn handle_packets( @@ -666,29 +492,29 @@ impl Signer { stacks_client: &StacksClient, res: Sender>, packets: &[Packet], - current_reward_cycle: u64, ) { let signer_outbound_messages = self - .state_machine + .signing_round .process_inbound_messages(packets) .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a signer: {e:?}",); + error!( + "Signer #{}: Failed to process inbound messages as a signer: {e:?}", + self.signer_id + ); vec![] }); // Next process the message as the coordinator - let (coordinator_outbound_messages, operation_results) = if self.reward_cycle - != current_reward_cycle - { - self.coordinator - .process_inbound_messages(packets) - .unwrap_or_else(|e| { - error!("{self}: Failed to process inbound messages as a coordinator: {e:?}"); - (vec![], vec![]) - }) - } else { - (vec![], vec![]) - }; + let (coordinator_outbound_messages, operation_results) = self + .coordinator + .process_inbound_messages(packets) + .unwrap_or_else(|e| { + error!( + "Signer #{}: Failed to process inbound messages as a coordinator: {e:?}", + self.signer_id + ); + (vec![], vec![]) + }); if !operation_results.is_empty() { // We have finished a signing or DKG round, either successfully or due to error. @@ -700,9 +526,6 @@ impl Signer { // We have received a message and are in the middle of an operation. Update our state accordingly self.update_operation(); } - - debug!("{self}: Saving signer state"); - self.save_signer_state(); self.send_outbound_messages(signer_outbound_messages); self.send_outbound_messages(coordinator_outbound_messages); } @@ -715,22 +538,21 @@ impl Signer { else { // We currently reject anything that is not a block vote debug!( - "{self}: Received a signature share request for an unknown message stream. Reject it.", + "Signer #{}: Received a signature share request for an unknown message stream. Reject it.", + self.signer_id ); return false; }; - match self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to DB")) - .map(|b| b.vote) + .blocks + .get(&block_vote.signer_signature_hash) + .map(|block_info| &block_info.vote) { Some(Some(vote)) => { // Overwrite with our agreed upon value in case another message won majority or the coordinator is trying to cheat... debug!( - "{self}: Set vote (rejected = {}) to {vote:?}", block_vote.rejected; - "requested_sighash" => %block_vote.signer_signature_hash, + "Signer #{}: set vote for {} to {vote:?}", + self.signer_id, block_vote.rejected ); request.message = vote.serialize_to_vec(); true @@ -739,20 +561,14 @@ impl Signer { // We never agreed to sign this block. Reject it. // This can happen if the coordinator received enough votes to sign yes // or no on a block before we received validation from the stacks node. - debug!( - "{self}: Received a signature share request for a block we never agreed to sign. Ignore it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); + debug!("Signer #{}: Received a signature share request for a block we never agreed to sign. Ignore it.", self.signer_id); false } None => { // We will only sign across block hashes or block hashes + b'n' byte for // blocks we have seen a Nonce Request for (and subsequent validation) // We are missing the context here necessary to make a decision. Reject the block - debug!( - "{self}: Received a signature share request from an unknown block. Reject it."; - "requested_sighash" => %block_vote.signer_signature_hash, - ); + debug!("Signer #{}: Received a signature share request from an unknown block. Reject it.", self.signer_id); false } } @@ -766,42 +582,47 @@ impl Signer { &mut self, stacks_client: &StacksClient, nonce_request: &mut NonceRequest, - ) -> Option { - let Some(block) = - NakamotoBlock::consensus_deserialize(&mut nonce_request.message.as_slice()).ok() + ) -> bool { + let Some(block): Option = read_next(&mut &nonce_request.message[..]).ok() else { // We currently reject anything that is not a block - warn!("{self}: Received a nonce request for an unknown message stream. Reject it.",); - return None; + debug!( + "Signer #{}: Received a nonce request for an unknown message stream. Reject it.", + self.signer_id + ); + return false; }; let signer_signature_hash = block.header.signer_signature_hash(); - let Some(mut block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &signer_signature_hash) - .expect("Failed to connect to signer DB") - else { - debug!( - "{self}: We have received a block sign request for a block we have not seen before. Cache the nonce request and submit the block for validation..."; - "signer_sighash" => %block.header.signer_signature_hash(), + let Some(block_info) = self.blocks.get_mut(&signer_signature_hash) else { + // We have not seen this block before. Cache it. Send a RPC to the stacks node to validate it. + debug!("Signer #{}: We have received a block sign request for a block we have not seen before. Cache the nonce request and submit the block for validation...", self.signer_id); + // We need to update our state to OperationInProgress so we can respond to the nonce request from this signer once we get our validation back + self.update_operation(); + // Store the block in our cache + self.blocks.insert( + signer_signature_hash, + BlockInfo::new_with_request(block.clone(), nonce_request.clone()), ); - let block_info = BlockInfo::new_with_request(block.clone(), nonce_request.clone()); stacks_client .submit_block_for_validation(block) .unwrap_or_else(|e| { - warn!("{self}: Failed to submit block for validation: {e:?}",); + warn!( + "Signer #{}: Failed to submit block for validation: {e:?}", + self.signer_id + ); }); - return Some(block_info); + return false; }; if block_info.valid.is_none() { // We have not yet received validation from the stacks node. Cache the request and wait for validation - debug!("{self}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation..."); + debug!("Signer #{}: We have yet to receive validation from the stacks node for a nonce request. Cache the nonce request and wait for block validation...", self.signer_id); block_info.nonce_request = Some(nonce_request.clone()); - return Some(block_info); + return false; } - self.determine_vote(&mut block_info, nonce_request); - Some(block_info) + Self::determine_vote(self.signer_id, block_info, nonce_request); + true } /// Verify the transactions in a block are as expected @@ -810,14 +631,10 @@ impl Signer { stacks_client: &StacksClient, block: &NakamotoBlock, ) -> bool { - let next_reward_cycle = self.reward_cycle.wrapping_add(1); - let approved_aggregate_public_key = stacks_client - .get_approved_aggregate_key(next_reward_cycle) - .unwrap_or(None); - if approved_aggregate_public_key.is_some() { - // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set for the upcoming signers' reward cycle - // Otherwise it is a waste of block space and time to enforce as the desired outcome has been reached. - debug!("{self}: Already have an aggregate key for the next signer set's reward cycle ({}). Skipping transaction verification...", next_reward_cycle); + if self.approved_aggregate_public_key.is_some() { + // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set + // TODO: should be only allow special cased transactions during prepare phase before a key is set? + debug!("Signer #{}: Already have an aggregate key for reward cycle {}. Skipping transaction verification...", self.signer_id, self.reward_cycle); return true; } if let Ok(expected_transactions) = self.get_expected_transactions(stacks_client) { @@ -828,17 +645,25 @@ impl Signer { .into_iter() .filter_map(|tx| { if !block_tx_hashset.contains(&tx.txid()) { - debug!("{self}: expected txid {} is in the block", &tx.txid()); + debug!( + "Signer #{}: expected txid {} is in the block", + self.signer_id, + &tx.txid() + ); Some(tx) } else { - debug!("{self}: missing expected txid {}", &tx.txid()); + debug!( + "Signer #{}: missing expected txid {}", + self.signer_id, + &tx.txid() + ); None } }) .collect::>(); let is_valid = missing_transactions.is_empty(); if !is_valid { - debug!("{self}: Broadcasting a block rejection due to missing expected transactions..."); + debug!("Signer #{}: Broadcasting a block rejection due to missing expected transactions...", self.signer_id); let block_rejection = BlockRejection::new( block.header.signer_signature_hash(), RejectCode::MissingTransactions(missing_transactions), @@ -848,13 +673,19 @@ impl Signer { .stackerdb .send_message_with_retry(block_rejection.into()) { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); + warn!( + "Signer #{}: Failed to send block rejection to stacker-db: {e:?}", + self.signer_id + ); } } is_valid } else { // Failed to connect to the stacks node to get transactions. Cannot validate the block. Reject it. - debug!("{self}: Broadcasting a block rejection due to signer connectivity issues...",); + debug!( + "Signer #{}: Broadcasting a block rejection due to signer connectivity issues...", + self.signer_id + ); let block_rejection = BlockRejection::new( block.header.signer_signature_hash(), RejectCode::ConnectivityIssues, @@ -864,7 +695,10 @@ impl Signer { .stackerdb .send_message_with_retry(block_rejection.into()) { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}",); + warn!( + "Signer #{}: Failed to send block submission to stacker-db: {e:?}", + self.signer_id + ); } false } @@ -873,11 +707,11 @@ impl Signer { /// Get transactions from stackerdb for the given addresses and account nonces, filtering out any malformed transactions fn get_signer_transactions( &mut self, - nonces: &std::collections::HashMap, + nonces: &HashMap, ) -> Result, ClientError> { let transactions: Vec<_> = self .stackerdb - .get_current_transactions()? + .get_current_transactions_with_retry()? .into_iter() .filter_map(|tx| { if !NakamotoSigners::valid_vote_transaction(nonces, &tx, self.mainnet) { @@ -895,15 +729,18 @@ impl Signer { stacks_client: &StacksClient, ) -> Result, ClientError> { if self.next_signer_slot_ids.is_empty() { - debug!("{self}: No next signers. Skipping transaction retrieval.",); + debug!( + "Signer #{}: No next signers. Skipping transaction retrieval.", + self.signer_id + ); return Ok(vec![]); } // Get all the account nonces for the next signers let account_nonces = self.get_account_nonces(stacks_client, &self.next_signer_addresses); let transactions: Vec<_> = self .stackerdb - .get_next_transactions(&self.next_signer_slot_ids)?; - let mut filtered_transactions = std::collections::HashMap::new(); + .get_next_transactions_with_retry(&self.next_signer_slot_ids)?; + let mut filtered_transactions = HashMap::new(); NakamotoSigners::update_filtered_transactions( &mut filtered_transactions, &account_nonces, @@ -915,12 +752,24 @@ impl Signer { } /// Determine the vote for a block and update the block info and nonce request accordingly - fn determine_vote(&self, block_info: &mut BlockInfo, nonce_request: &mut NonceRequest) { + fn determine_vote( + signer_id: u32, + block_info: &mut BlockInfo, + nonce_request: &mut NonceRequest, + ) { let rejected = !block_info.valid.unwrap_or(false); if rejected { - debug!("{self}: Rejecting block {}", block_info.block.block_id()); + debug!( + "Signer #{}: Rejecting block {}", + signer_id, + block_info.block.block_id() + ); } else { - debug!("{self}: Accepting block {}", block_info.block.block_id()); + debug!( + "Signer #{}: Accepting block {}", + signer_id, + block_info.block.block_id() + ); } let block_vote = NakamotoBlockVote { signer_signature_hash: block_info.block.header.signer_signature_hash(), @@ -944,7 +793,7 @@ impl Signer { coordinator_public_key: &PublicKey, ) -> Option { // We only care about verified wsts packets. Ignore anything else. - if packet.verify(&self.state_machine.public_keys, coordinator_public_key) { + if packet.verify(&self.signing_round.public_keys, coordinator_public_key) { match &mut packet.msg { Message::SignatureShareRequest(request) => { if !self.validate_signature_share_request(request) { @@ -952,18 +801,7 @@ impl Signer { } } Message::NonceRequest(request) => { - let Some(updated_block_info) = - self.validate_nonce_request(stacks_client, request) - else { - warn!("Failed to validate and parse nonce request"); - return None; - }; - self.signer_db - .insert_block(self.reward_cycle, &updated_block_info) - .unwrap_or_else(|_| panic!("{self}: Failed to insert block in DB")); - let process_request = updated_block_info.vote.is_some(); - if !process_request { - debug!("Failed to validate nonce request"); + if !self.validate_nonce_request(stacks_client, request) { return None; } } @@ -974,8 +812,8 @@ impl Signer { Some(packet) } else { debug!( - "{self}: Failed to verify wsts packet with {}: {packet:?}", - coordinator_public_key + "Signer #{}: Failed to verify wsts packet with {}: {packet:?}", + self.signer_id, coordinator_public_key ); None } @@ -992,21 +830,21 @@ impl Signer { // Signers only every trigger non-taproot signing rounds over blocks. Ignore SignTaproot results match operation_result { OperationResult::Sign(signature) => { - debug!("{self}: Received signature result"); + debug!("Signer #{}: Received signature result", self.signer_id); self.process_signature(signature); } OperationResult::SignTaproot(_) => { - debug!("{self}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature."); + debug!("Signer #{}: Received a signature result for a taproot signature. Nothing to broadcast as we currently sign blocks with a FROST signature.", self.signer_id); } - OperationResult::Dkg(aggregate_key) => { - self.process_dkg(stacks_client, aggregate_key); + OperationResult::Dkg(dkg_public_key) => { + self.process_dkg(stacks_client, dkg_public_key); } OperationResult::SignError(e) => { - warn!("{self}: Received a Sign error: {e:?}"); + warn!("Signer #{}: Received a Sign error: {e:?}", self.signer_id); self.process_sign_error(e); } OperationResult::DkgError(e) => { - warn!("{self}: Received a DKG error: {e:?}"); + warn!("Signer #{}: Received a DKG error: {e:?}", self.signer_id); // TODO: process these errors and track malicious signers to report } } @@ -1015,42 +853,50 @@ impl Signer { /// Process a dkg result by broadcasting a vote to the stacks node fn process_dkg(&mut self, stacks_client: &StacksClient, dkg_public_key: &Point) { - let mut dkg_results_bytes = vec![]; - if let Err(e) = SignerMessage::serialize_dkg_result( - &mut dkg_results_bytes, - dkg_public_key, - self.coordinator.party_polynomials.iter(), - ) { - error!("{}: Failed to serialize DKGResults message for StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } else if let Err(e) = self - .stackerdb - .send_message_bytes_with_retry(&MessageSlotID::DkgResults, dkg_results_bytes) - { - error!("{}: Failed to send DKGResults message to StackerDB, will continue operating.", self.signer_id; - "error" => %e); - } - + let epoch = retry_with_exponential_backoff(|| { + stacks_client + .get_node_epoch() + .map_err(backoff::Error::transient) + }) + .unwrap_or(StacksEpochId::Epoch24); + let tx_fee = if epoch < StacksEpochId::Epoch30 { + debug!( + "Signer #{}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote.", + self.signer_id + ); + Some(self.tx_fee_ustx) + } else { + None + }; // Get our current nonce from the stacks node and compare it against what we have sitting in the stackerdb instance let signer_address = stacks_client.get_signer_address(); // Retreieve ALL account nonces as we may have transactions from other signers in our stackerdb slot that we care about let account_nonces = self.get_account_nonces(stacks_client, &self.signer_addresses); let account_nonce = account_nonces.get(signer_address).unwrap_or(&0); - let signer_transactions = self - .get_signer_transactions(&account_nonces) - .map_err(|e| { - error!("{self}: Unable to get signer transactions: {e:?}."); - }) - .unwrap_or_default(); + let signer_transactions = retry_with_exponential_backoff(|| { + self.get_signer_transactions(&account_nonces) + .map_err(backoff::Error::transient) + }) + .map_err(|e| { + warn!( + "Signer #{}: Unable to get signer transactions: {e:?}", + self.signer_id + ); + }) + .unwrap_or_default(); // If we have a transaction in the stackerdb slot, we need to increment the nonce hence the +1, else should use the account nonce let next_nonce = signer_transactions .first() .map(|tx| tx.get_origin_nonce().wrapping_add(1)) .unwrap_or(*account_nonce); - let epoch = stacks_client - .get_node_epoch() - .unwrap_or(StacksEpochId::Epoch24); - match self.build_dkg_vote(stacks_client, &epoch, next_nonce, *dkg_public_key) { + match stacks_client.build_vote_for_aggregate_public_key( + self.stackerdb.get_signer_slot_id().0, + self.coordinator.current_dkg_id, + *dkg_public_key, + self.reward_cycle, + tx_fee, + next_nonce, + ) { Ok(new_transaction) => { if let Err(e) = self.broadcast_dkg_vote( stacks_client, @@ -1059,66 +905,37 @@ impl Signer { new_transaction, ) { warn!( - "{self}: Failed to broadcast DKG public key vote ({dkg_public_key:?}): {e:?}" + "Signer #{}: Failed to broadcast DKG public key vote ({dkg_public_key:?}): {e:?}", + self.signer_id ); } } Err(e) => { warn!( - "{self}: Failed to build DKG public key vote ({dkg_public_key:?}) transaction: {e:?}." + "Signer #{}: Failed to build DKG public key vote ({dkg_public_key:?}) transaction: {e:?}.", + self.signer_id ); } } } - /// Build a signed DKG vote transaction - fn build_dkg_vote( - &mut self, - stacks_client: &StacksClient, - epoch: &StacksEpochId, - nonce: u64, - dkg_public_key: Point, - ) -> Result { - let mut unsigned_tx = stacks_client.build_unsigned_vote_for_aggregate_public_key( - self.stackerdb.get_signer_slot_id().0, - self.coordinator.current_dkg_id, - dkg_public_key, - self.reward_cycle, - nonce, - )?; - let tx_fee = if epoch < &StacksEpochId::Epoch30 { - info!("{self}: in pre Epoch 3.0 cycles, must set a transaction fee for the DKG vote."); - let fee = if let Some(max_fee) = self.max_tx_fee_ustx { - let estimated_fee = stacks_client - .get_medium_estimated_fee_ustx(&unsigned_tx) - .map_err(|e| { - warn!("{self}: unable to estimate fee for DKG vote transaction: {e:?}."); - e - }) - .unwrap_or(self.tx_fee_ustx); - std::cmp::min(estimated_fee, max_fee) - } else { - self.tx_fee_ustx - }; - debug!("{self}: Using a fee of {fee} uSTX for DKG vote transaction."); - fee - } else { - 0 - }; - unsigned_tx.set_tx_fee(tx_fee); - stacks_client.sign_transaction(unsigned_tx) - } - // Get the account nonces for the provided list of signer addresses fn get_account_nonces( &self, stacks_client: &StacksClient, signer_addresses: &[StacksAddress], - ) -> std::collections::HashMap { - let mut account_nonces = std::collections::HashMap::with_capacity(signer_addresses.len()); + ) -> HashMap { + let mut account_nonces = HashMap::with_capacity(signer_addresses.len()); for address in signer_addresses { - let Ok(account_nonce) = stacks_client.get_account_nonce(address) else { - warn!("{self}: Unable to get account nonce for address: {address}."); + let Ok(account_nonce) = retry_with_exponential_backoff(|| { + stacks_client + .get_account_nonce(address) + .map_err(backoff::Error::transient) + }) else { + warn!( + "Signer #{}: Unable to get account nonce for address: {address}.", + self.signer_id + ); continue; }; account_nonces.insert(*address, account_nonce); @@ -1138,25 +955,32 @@ impl Signer { if self.approved_aggregate_public_key.is_some() { // We do not enforce a block contain any transactions except the aggregate votes when it is NOT already set info!( - "{self}: Already has an approved aggregate key. Do not broadcast the transaction ({txid:?})." + "Signer #{}: Already has an aggregate key for reward cycle {}. Do not broadcast the transaction ({txid:?}).", + self.signer_id, self.reward_cycle ); return Ok(()); } if epoch >= StacksEpochId::Epoch30 { - debug!("{self}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB."); + debug!("Signer #{}: Received a DKG result while in epoch 3.0. Broadcast the transaction only to stackerDB.", self.signer_id); } else if epoch == StacksEpochId::Epoch25 { - debug!("{self}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool."); + debug!("Signer #{}: Received a DKG result while in epoch 2.5. Broadcast the transaction to the mempool.", self.signer_id); stacks_client.submit_transaction(&new_transaction)?; - info!("{self}: Submitted DKG vote transaction ({txid:?}) to the mempool"); + info!( + "Signer #{}: Submitted DKG vote transaction ({txid:?}) to the mempool", + self.signer_id + ); } else { - debug!("{self}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", new_transaction.txid()); + debug!("Signer #{}: Received a DKG result, but are in an unsupported epoch. Do not broadcast the transaction ({}).", self.signer_id, new_transaction.txid()); return Ok(()); } // For all Pox-4 epochs onwards, broadcast the results also to stackerDB for other signers/miners to observe signer_transactions.push(new_transaction); let signer_message = SignerMessage::Transactions(signer_transactions); self.stackerdb.send_message_with_retry(signer_message)?; - info!("{self}: Broadcasted DKG vote transaction ({txid}) to stacker DB"); + info!( + "Signer #{}: Broadcasted DKG vote transaction ({txid}) to stacker DB", + self.signer_id, + ); Ok(()) } @@ -1166,25 +990,34 @@ impl Signer { // Deserialize the signature result and broadcast an appropriate Reject or Approval message to stackerdb let message = self.coordinator.get_message(); let Some(block_vote): Option = read_next(&mut &message[..]).ok() else { - debug!("{self}: Received a signature result for a non-block. Nothing to broadcast."); + debug!( + "Signer #{}: Received a signature result for a non-block. Nothing to broadcast.", + self.signer_id + ); return; }; + // TODO: proper garbage collection...This is currently our only cleanup of blocks + self.blocks.remove(&block_vote.signer_signature_hash); + let block_submission = if block_vote.rejected { // We signed a rejection message. Return a rejection message - BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()) + BlockResponse::rejected(block_vote.signer_signature_hash, signature.clone()).into() } else { // we agreed to sign the block hash. Return an approval message - BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()) + BlockResponse::accepted(block_vote.signer_signature_hash, signature.clone()).into() }; // Submit signature result to miners to observe - info!("{self}: Submit block response: {block_submission}"); - if let Err(e) = self - .stackerdb - .send_message_with_retry(block_submission.into()) - { - warn!("{self}: Failed to send block submission to stacker-db: {e:?}"); + debug!( + "Signer #{}: submit block response {block_submission:?}", + self.signer_id + ); + if let Err(e) = self.stackerdb.send_message_with_retry(block_submission) { + warn!( + "Signer #{}: Failed to send block submission to stacker-db: {e:?}", + self.signer_id + ); } } @@ -1196,50 +1029,35 @@ impl Signer { let block: NakamotoBlock = read_next(&mut &message[..]).ok().unwrap_or({ // This is not a block so maybe its across its hash - let Some(block_vote): Option = read_next(&mut &message[..]).ok() - else { + let Some(block_vote): Option = read_next(&mut &message[..]).ok() else { // This is not a block vote either. We cannot process this error - debug!( - "{self}: Received a signature error for a non-block. Nothing to broadcast." - ); + debug!("Signer #{}: Received a signature error for a non-block. Nothing to broadcast.", self.signer_id); return; }; - let Some(block_info) = self - .signer_db - .block_lookup(self.reward_cycle, &block_vote.signer_signature_hash) - .unwrap_or_else(|_| panic!("{self}: Failed to connect to signer DB")) - else { - debug!( - "{self}: Received a signature result for a block we have not seen before. Ignoring..." - ); + let Some(block_info) = self.blocks.remove(&block_vote.signer_signature_hash) else { + debug!("Signer #{}: Received a signature result for a block we have not seen before. Ignoring...", self.signer_id); return; }; block_info.block }); let block_rejection = BlockRejection::new(block.header.signer_signature_hash(), RejectCode::from(e)); - debug!("{self}: Broadcasting block rejection: {block_rejection:?}"); + debug!( + "Signer #{}: Broadcasting block rejection: {block_rejection:?}", + self.signer_id + ); // Submit signature result to miners to observe if let Err(e) = self .stackerdb .send_message_with_retry(block_rejection.into()) { - warn!("{self}: Failed to send block rejection submission to stacker-db: {e:?}"); + warn!( + "Signer #{}: Failed to send block rejection submission to stacker-db: {e:?}", + self.signer_id + ); } } - /// Persist state needed to ensure the signer can continue to perform - /// DKG and participate in signing rounds accross crashes - /// - /// # Panics - /// Panics if the insertion fails - fn save_signer_state(&self) { - let state = self.state_machine.signer.save(); - self.signer_db - .insert_signer_state(self.reward_cycle, &state) - .expect("Failed to persist signer state"); - } - /// Send any operation results across the provided channel fn send_operation_results( &mut self, @@ -1249,10 +1067,16 @@ impl Signer { let nmb_results = operation_results.len(); match res.send(operation_results) { Ok(_) => { - debug!("{self}: Successfully sent {nmb_results} operation result(s)") + debug!( + "Signer #{}: Successfully sent {} operation result(s)", + self.signer_id, nmb_results + ) } Err(e) => { - warn!("{self}: Failed to send {nmb_results} operation results: {e:?}"); + warn!( + "Signer #{}: Failed to send {nmb_results} operation results: {e:?}", + self.signer_id + ); } } } @@ -1260,131 +1084,87 @@ impl Signer { /// Sending all provided packets through stackerdb with a retry fn send_outbound_messages(&mut self, outbound_messages: Vec) { debug!( - "{self}: Sending {} messages to other stacker-db instances.", + "Signer #{}: Sending {} messages to other stacker-db instances.", + self.signer_id, outbound_messages.len() ); for msg in outbound_messages { let ack = self.stackerdb.send_message_with_retry(msg.into()); if let Ok(ack) = ack { - debug!("{self}: send outbound ACK: {ack:?}"); + debug!("Signer #{}: send outbound ACK: {ack:?}", self.signer_id); } else { - warn!("{self}: Failed to send message to stacker-db instance: {ack:?}"); - } - } - } - - /// Should DKG be queued to the current signer's command queue - pub fn should_queue_dkg(&mut self, stacks_client: &StacksClient) -> Result { - if self.state != State::Idle - || self.signer_id != self.get_coordinator_dkg().0 - || self.commands.front() == Some(&Command::Dkg) - { - // We are not the coordinator, we are in the middle of an operation, or we have already queued DKG. Do not attempt to queue DKG - return Ok(false); - } - let signer_address = stacks_client.get_signer_address(); - if let Some(aggregate_key) = stacks_client.get_vote_for_aggregate_public_key( - self.coordinator.current_dkg_id, - self.reward_cycle, - *signer_address, - )? { - let Some(round_weight) = stacks_client - .get_round_vote_weight(self.reward_cycle, self.coordinator.current_dkg_id)? - else { - // This only will happen if somehow we registered as a signer and were granted no weight which should not really ever happen. - error!("{self}: already voted for DKG, but no round vote weight found. We either have no voting power or the contract is corrupted."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key - ); - return Ok(false); - }; - let threshold_weight = stacks_client.get_vote_threshold_weight(self.reward_cycle)?; - if round_weight < threshold_weight { - // The threshold weight has not been met yet. We should wait for more votes to arrive. - // TODO: this should be on a timeout of some kind. We should not wait forever for the threshold to be met. - // See https://github.com/stacks-network/stacks-core/issues/4568 - debug!("{self}: Not triggering a DKG round. Weight threshold has not been met yet. Waiting for more votes to arrive."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight + warn!( + "Signer #{}: Failed to send message to stacker-db instance: {ack:?}", + self.signer_id ); - return Ok(false); - } - warn!("{self}: Vote for DKG failed."; - "voting_round" => self.coordinator.current_dkg_id, - "aggregate_key" => %aggregate_key, - "round_weight" => round_weight, - "threshold_weight" => threshold_weight - ); - } else { - // Have I already voted, but the vote is still pending in StackerDB? Check stackerdb for the same round number and reward cycle vote transaction - // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes - let account_nonce = stacks_client.get_account_nonce(signer_address).unwrap_or(0); - let old_transactions = self.stackerdb.get_current_transactions()?; - // Check if we have an existing vote transaction for the same round and reward cycle - for transaction in old_transactions.iter() { - // We should not consider other signer transactions and should ignore invalid transaction versions - if transaction.origin_address() != *signer_address - || transaction.is_mainnet() != self.mainnet - { - continue; - } - let Some(params) = - NakamotoSigners::parse_vote_for_aggregate_public_key(transaction) - else { - continue; - }; - let Some(dkg_public_key) = self.coordinator.aggregate_public_key.clone() else { - break; - }; - if params.aggregate_key == dkg_public_key - && params.voting_round == self.coordinator.current_dkg_id - && params.reward_cycle == self.reward_cycle - { - let origin_nonce = transaction.get_origin_nonce(); - if origin_nonce < account_nonce { - // We have already voted, but our vote nonce is outdated. Resubmit vote with updated transaction - warn!("{self}: DKG vote submitted with invalid nonce ({origin_nonce} < {account_nonce}). Resubmitting vote."); - self.process_dkg(stacks_client, &dkg_public_key); - } else { - debug!("{self}: Already have a pending DKG vote in StackerDB. Waiting for it to be confirmed."; - "txid" => %transaction.txid(), - "aggregate_key" => %params.aggregate_key, - "voting_round" => params.voting_round, - "reward_cycle" => params.reward_cycle, - "nonce" => origin_nonce - ); - } - return Ok(false); - } } } - Ok(true) } /// Update the DKG for the provided signer info, triggering it if required pub fn update_dkg(&mut self, stacks_client: &StacksClient) -> Result<(), ClientError> { - let old_dkg = self.approved_aggregate_public_key; + let reward_cycle = self.reward_cycle; self.approved_aggregate_public_key = - stacks_client.get_approved_aggregate_key(self.reward_cycle)?; + stacks_client.get_approved_aggregate_key(reward_cycle)?; if self.approved_aggregate_public_key.is_some() { // TODO: this will never work as is. We need to have stored our party shares on the side etc for this particular aggregate key. // Need to update state to store the necessary info, check against it to see if we have participated in the winning round and // then overwrite our value accordingly. Otherwise, we will be locked out of the round and should not participate. self.coordinator .set_aggregate_public_key(self.approved_aggregate_public_key); - if old_dkg != self.approved_aggregate_public_key { - warn!( - "{self}: updated DKG value to {:?}.", - self.approved_aggregate_public_key - ); - } + // We have an approved aggregate public key. Do nothing further + debug!( + "Signer #{}: Have updated DKG value to {:?}.", + self.signer_id, self.approved_aggregate_public_key + ); return Ok(()); }; - if self.should_queue_dkg(stacks_client)? { - info!("{self} is the current coordinator and must trigger DKG. Queuing DKG command..."); - self.commands.push_front(Command::Dkg); + let coordinator_id = self.coordinator_selector.get_coordinator().0; + if self.signer_id == coordinator_id && self.state == State::Idle { + debug!( + "Signer #{}: Checking if old vote transaction exists in StackerDB...", + self.signer_id + ); + // Have I already voted and have a pending transaction? Check stackerdb for the same round number and reward cycle vote transaction + // Only get the account nonce of THIS signer as we only care about our own votes, not other signer votes + let signer_address = stacks_client.get_signer_address(); + let account_nonces = self.get_account_nonces(stacks_client, &[*signer_address]); + let old_transactions = self.get_signer_transactions(&account_nonces).map_err(|e| { + warn!("Signer #{}: Failed to get old signer transactions: {e:?}. May trigger DKG unnecessarily", self.signer_id); + }).unwrap_or_default(); + // Check if we have an existing vote transaction for the same round and reward cycle + for transaction in old_transactions.iter() { + let params = + NakamotoSigners::parse_vote_for_aggregate_public_key(transaction).unwrap_or_else(|| panic!("BUG: Signer #{}: Received an invalid {SIGNERS_VOTING_FUNCTION_NAME} transaction in an already filtered list: {transaction:?}", self.signer_id)); + if Some(params.aggregate_key) == self.coordinator.aggregate_public_key + && params.voting_round == self.coordinator.current_dkg_id + && reward_cycle == self.reward_cycle + { + debug!("Signer #{}: Not triggering a DKG round. Already have a pending vote transaction.", self.signer_id; + "txid" => %transaction.txid(), + "aggregate_key" => %params.aggregate_key, + "voting_round" => params.voting_round + ); + return Ok(()); + } + } + if stacks_client + .get_vote_for_aggregate_public_key( + self.coordinator.current_dkg_id, + self.reward_cycle, + *stacks_client.get_signer_address(), + )? + .is_some() + { + // TODO Check if the vote failed and we need to retrigger the DKG round not just if we have already voted... + // TODO need logic to trigger another DKG round if a certain amount of time passes and we still have no confirmed DKG vote + debug!("Signer #{}: Not triggering a DKG round. Already voted and we may need to wait for more votes to arrive.", self.signer_id); + return Ok(()); + } + if self.commands.front() != Some(&Command::Dkg) { + info!("Signer #{} is the current coordinator for {reward_cycle} and must trigger DKG. Queuing DKG command...", self.signer_id); + self.commands.push_front(Command::Dkg); + } } Ok(()) } @@ -1397,57 +1177,46 @@ impl Signer { res: Sender>, current_reward_cycle: u64, ) -> Result<(), ClientError> { - debug!("{self}: Processing event: {event:?}"); + debug!("Signer #{}: Processing event: {event:?}", self.signer_id); match event { Some(SignerEvent::BlockValidationResponse(block_validate_response)) => { - debug!("{self}: Received a block proposal result from the stacks node..."); - self.handle_block_validate_response( - stacks_client, - block_validate_response, - res, - current_reward_cycle, - ) + debug!( + "Signer #{}: Received a block proposal result from the stacks node...", + self.signer_id + ); + self.handle_block_validate_response(stacks_client, block_validate_response, res) } Some(SignerEvent::SignerMessages(signer_set, messages)) => { if *signer_set != self.stackerdb.get_signer_set() { - debug!("{self}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring..."); + debug!("Signer #{}: Received a signer message for a reward cycle that does not belong to this signer. Ignoring...", self.signer_id); return Ok(()); } debug!( - "{self}: Received {} messages from the other signers...", + "Signer #{}: Received {} messages from the other signers...", + self.signer_id, messages.len() ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); + self.handle_signer_messages(stacks_client, res, messages); } - Some(SignerEvent::MinerMessages(blocks, messages, miner_key)) => { - if let Some(miner_key) = miner_key { - let miner_key = PublicKey::try_from(miner_key.to_bytes_compressed().as_slice()) - .expect("FATAL: could not convert from StacksPublicKey to PublicKey"); - self.miner_key = Some(miner_key); - }; + Some(SignerEvent::ProposedBlocks(blocks)) => { if current_reward_cycle != self.reward_cycle { // There is not point in processing blocks if we are not the current reward cycle (we can never actually contribute to signing these blocks) - debug!("{self}: Received a proposed block, but this signer's reward cycle is not the current one ({current_reward_cycle}). Ignoring..."); + debug!("Signer #{}: Received a proposed block, but this signer's reward cycle ({}) is not the current one ({}). Ignoring...", self.signer_id, self.reward_cycle, current_reward_cycle); return Ok(()); } debug!( - "{self}: Received {} block proposals and {} messages from the miner", - blocks.len(), - messages.len(); - "miner_key" => ?miner_key, + "Signer #{}: Received {} block proposals from the miners...", + self.signer_id, + blocks.len() ); - self.handle_signer_messages(stacks_client, res, messages, current_reward_cycle); self.handle_proposed_blocks(stacks_client, blocks); } Some(SignerEvent::StatusCheck) => { - debug!("{self}: Received a status check event.") - } - Some(SignerEvent::NewBurnBlock(height)) => { - debug!("{self}: Receved a new burn block event for block height {height}") + debug!("Signer #{}: Received a status check event.", self.signer_id) } None => { // No event. Do nothing. - debug!("{self}: No event received") + debug!("Signer #{}: No event received", self.signer_id) } } Ok(()) diff --git a/stacks-signer/src/signerdb.rs b/stacks-signer/src/signerdb.rs deleted file mode 100644 index 1f568ff37f1..00000000000 --- a/stacks-signer/src/signerdb.rs +++ /dev/null @@ -1,375 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::path::Path; - -use blockstack_lib::util_lib::db::{ - query_row, sqlite_open, table_exists, u64_to_sql, Error as DBError, -}; -use rusqlite::{params, Connection, Error as SqliteError, OpenFlags, NO_PARAMS}; -use slog::slog_debug; -use stacks_common::debug; -use stacks_common::util::hash::Sha512Trunc256Sum; -use wsts::traits::SignerState; - -use crate::signer::BlockInfo; - -/// This struct manages a SQLite database connection -/// for the signer. -#[derive(Debug)] -pub struct SignerDb { - /// Connection to the SQLite database - db: Connection, -} - -const CREATE_BLOCKS_TABLE: &str = " -CREATE TABLE IF NOT EXISTS blocks ( - reward_cycle INTEGER NOT NULL, - signer_signature_hash TEXT NOT NULL, - block_info TEXT NOT NULL, - PRIMARY KEY (reward_cycle, signer_signature_hash) -)"; - -const CREATE_SIGNER_STATE_TABLE: &str = " -CREATE TABLE IF NOT EXISTS signer_states ( - reward_cycle INTEGER PRIMARY KEY, - state TEXT NOT NULL -)"; - -impl SignerDb { - /// Create a new `SignerState` instance. - /// This will create a new SQLite database at the given path - /// or an in-memory database if the path is ":memory:" - pub fn new(db_path: impl AsRef) -> Result { - let connection = Self::connect(db_path)?; - - let signer_db = Self { db: connection }; - - signer_db.instantiate_db()?; - - Ok(signer_db) - } - - fn instantiate_db(&self) -> Result<(), DBError> { - if !table_exists(&self.db, "blocks")? { - self.db.execute(CREATE_BLOCKS_TABLE, NO_PARAMS)?; - } - - if !table_exists(&self.db, "signer_states")? { - self.db.execute(CREATE_SIGNER_STATE_TABLE, NO_PARAMS)?; - } - - Ok(()) - } - - fn connect(db_path: impl AsRef) -> Result { - sqlite_open( - db_path, - OpenFlags::SQLITE_OPEN_READ_WRITE | OpenFlags::SQLITE_OPEN_CREATE, - false, - ) - } - - /// Get the signer state for the provided reward cycle if it exists in the database - pub fn get_signer_state(&self, reward_cycle: u64) -> Result, DBError> { - let result: Option = query_row( - &self.db, - "SELECT state FROM signer_states WHERE reward_cycle = ?", - [u64_to_sql(reward_cycle)?], - )?; - - try_deserialize(result) - } - - /// Insert the given state in the `signer_states` table for the given reward cycle - pub fn insert_signer_state( - &self, - reward_cycle: u64, - signer_state: &SignerState, - ) -> Result<(), DBError> { - let serialized_state = serde_json::to_string(signer_state)?; - self.db.execute( - "INSERT OR REPLACE INTO signer_states (reward_cycle, state) VALUES (?1, ?2)", - params![&u64_to_sql(reward_cycle)?, &serialized_state], - )?; - Ok(()) - } - - /// Fetch a block from the database using the block's - /// `signer_signature_hash` - pub fn block_lookup( - &self, - reward_cycle: u64, - hash: &Sha512Trunc256Sum, - ) -> Result, DBError> { - let result: Option = query_row( - &self.db, - "SELECT block_info FROM blocks WHERE reward_cycle = ? AND signer_signature_hash = ?", - params![&u64_to_sql(reward_cycle)?, hash.to_string()], - )?; - - try_deserialize(result) - } - - /// Insert a block into the database. - /// `hash` is the `signer_signature_hash` of the block. - pub fn insert_block( - &mut self, - reward_cycle: u64, - block_info: &BlockInfo, - ) -> Result<(), DBError> { - let block_json = - serde_json::to_string(&block_info).expect("Unable to serialize block info"); - let hash = &block_info.signer_signature_hash(); - let block_id = &block_info.block.block_id(); - let signed_over = &block_info.signed_over; - debug!( - "Inserting block_info: reward_cycle = {reward_cycle}, sighash = {hash}, block_id = {block_id}, signed = {signed_over} vote = {:?}", - block_info.vote.as_ref().map(|v| { - if v.rejected { - "REJECT" - } else { - "ACCEPT" - } - }) - ); - self.db - .execute( - "INSERT OR REPLACE INTO blocks (reward_cycle, signer_signature_hash, block_info) VALUES (?1, ?2, ?3)", - params![&u64_to_sql(reward_cycle)?, hash.to_string(), &block_json], - )?; - - Ok(()) - } -} - -fn try_deserialize(s: Option) -> Result, DBError> -where - T: serde::de::DeserializeOwned, -{ - s.as_deref() - .map(serde_json::from_str) - .transpose() - .map_err(DBError::SerializationError) -} - -#[cfg(test)] -pub fn test_signer_db(db_path: &str) -> SignerDb { - use std::fs; - - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - SignerDb::new(db_path).expect("Failed to create signer db") -} - -#[cfg(test)] -mod tests { - use std::fs; - use std::path::PathBuf; - - use blockstack_lib::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote, - }; - use blockstack_lib::chainstate::stacks::ThresholdSignature; - use num_traits::identities::Zero; - use polynomial::Polynomial; - use stacks_common::bitvec::BitVec; - use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId, TrieHash}; - use stacks_common::util::secp256k1::MessageSignature; - use wsts::common::Nonce; - use wsts::curve::point::Point; - use wsts::curve::scalar::Scalar; - use wsts::traits::PartyState; - - use super::*; - - fn _wipe_db(db_path: &PathBuf) { - if fs::metadata(db_path).is_ok() { - fs::remove_file(db_path).unwrap(); - } - } - - fn create_block_override( - overrides: impl FnOnce(&mut NakamotoBlock), - ) -> (BlockInfo, NakamotoBlock) { - let header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - let mut block = NakamotoBlock { - header, - txs: vec![], - }; - overrides(&mut block); - (BlockInfo::new(block.clone()), block) - } - - fn create_signer_state(id: u32) -> SignerState { - let ps1 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), - }; - - let ps2 = PartyState { - polynomial: Some(Polynomial::new(vec![1.into(), 2.into(), 3.into()])), - private_keys: vec![(1, 45.into()), (2, 56.into())], - nonce: Nonce::zero(), - }; - - SignerState { - id, - key_ids: vec![2, 4], - num_keys: 12, - num_parties: 10, - threshold: 7, - group_key: Point::from(Scalar::from(42)), - parties: vec![(2, ps1), (4, ps2)], - } - } - - fn create_block() -> (BlockInfo, NakamotoBlock) { - create_block_override(|_| {}) - } - - fn tmp_db_path() -> PathBuf { - std::env::temp_dir().join(format!( - "stacks-signer-test-{}.sqlite", - rand::random::() - )) - } - - fn test_basic_signer_db_with_path(db_path: impl AsRef) { - let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 1; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) - .expect("Unable to insert block into db"); - - let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) - .unwrap() - .expect("Unable to get block from db"); - - assert_eq!(BlockInfo::new(block.clone()), block_info); - - // Test looking up a block from a different reward cycle - let block_info = db - .block_lookup(reward_cycle + 1, &block.header.signer_signature_hash()) - .unwrap(); - assert!(block_info.is_none()); - } - - #[test] - fn test_basic_signer_db() { - let db_path = tmp_db_path(); - test_basic_signer_db_with_path(db_path) - } - - #[test] - fn test_basic_signer_db_in_memory() { - test_basic_signer_db_with_path(":memory:") - } - - #[test] - fn test_update_block() { - let db_path = tmp_db_path(); - let mut db = SignerDb::new(db_path).expect("Failed to create signer db"); - let reward_cycle = 42; - let (block_info, block) = create_block(); - db.insert_block(reward_cycle, &block_info) - .expect("Unable to insert block into db"); - - let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) - .unwrap() - .expect("Unable to get block from db"); - - assert_eq!(BlockInfo::new(block.clone()), block_info); - - let old_block_info = block_info; - let old_block = block; - - let (mut block_info, block) = create_block_override(|b| { - b.header.signer_signature = old_block.header.signer_signature.clone(); - }); - assert_eq!( - block_info.signer_signature_hash(), - old_block_info.signer_signature_hash() - ); - let vote = NakamotoBlockVote { - signer_signature_hash: Sha512Trunc256Sum([0x01; 32]), - rejected: false, - }; - block_info.vote = Some(vote.clone()); - db.insert_block(reward_cycle, &block_info) - .expect("Unable to insert block into db"); - - let block_info = db - .block_lookup(reward_cycle, &block.header.signer_signature_hash()) - .unwrap() - .expect("Unable to get block from db"); - - assert_ne!(old_block_info, block_info); - assert_eq!(block_info.vote, Some(vote)); - } - - #[test] - fn test_write_signer_state() { - let db_path = tmp_db_path(); - let db = SignerDb::new(db_path).expect("Failed to create signer db"); - let state_0 = create_signer_state(0); - let state_1 = create_signer_state(1); - - db.insert_signer_state(10, &state_0) - .expect("Failed to insert signer state"); - - db.insert_signer_state(11, &state_1) - .expect("Failed to insert signer state"); - - assert_eq!( - db.get_signer_state(10) - .expect("Failed to get signer state") - .unwrap() - .id, - state_0.id - ); - assert_eq!( - db.get_signer_state(11) - .expect("Failed to get signer state") - .unwrap() - .id, - state_1.id - ); - assert!(db - .get_signer_state(12) - .expect("Failed to get signer state") - .is_none()); - assert!(db - .get_signer_state(9) - .expect("Failed to get signer state") - .is_none()); - } -} diff --git a/stacks-signer/src/tests/conf/signer-0.toml b/stacks-signer/src/tests/conf/signer-0.toml index 32183e0e797..449392c2e3d 100644 --- a/stacks-signer/src/tests/conf/signer-0.toml +++ b/stacks-signer/src/tests/conf/signer-0.toml @@ -3,4 +3,3 @@ node_host = "127.0.0.1:20443" endpoint = "localhost:30000" network = "testnet" auth_password = "12345" -db_path = ":memory:" diff --git a/stacks-signer/src/tests/conf/signer-1.toml b/stacks-signer/src/tests/conf/signer-1.toml index 7bade0e39bc..3d293af6408 100644 --- a/stacks-signer/src/tests/conf/signer-1.toml +++ b/stacks-signer/src/tests/conf/signer-1.toml @@ -3,4 +3,3 @@ node_host = "127.0.0.1:20444" endpoint = "localhost:30001" network = "testnet" auth_password = "12345" -db_path = ":memory:" diff --git a/stacks-signer/src/tests/conf/signer-4.toml b/stacks-signer/src/tests/conf/signer-4.toml new file mode 100644 index 00000000000..0e80a1aa6f9 --- /dev/null +++ b/stacks-signer/src/tests/conf/signer-4.toml @@ -0,0 +1,6 @@ + +stacks_private_key = "e427196ae29197b1db6d5495ff26bf0675f48a4f07b200c0814b95734ecda60f01" +node_host = "127.0.0.1:20443" +endpoint = "localhost:30004" +network = "testnet" +auth_password = "12345" diff --git a/stackslib/Cargo.toml b/stackslib/Cargo.toml index be753371153..d95492e2c35 100644 --- a/stackslib/Cargo.toml +++ b/stackslib/Cargo.toml @@ -57,9 +57,9 @@ pox-locking = { path = "../pox-locking" } libstackerdb = { path = "../libstackerdb" } siphasher = "0.3.7" wsts = { workspace = true } -hashbrown = { workspace = true } +proptest = { workspace = true, optional = true } -[target.'cfg(not(any(target_os = "macos",target_os="windows", target_arch = "arm" )))'.dependencies] +[target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemallocator = {workspace = true} [target.'cfg(unix)'.dependencies] @@ -95,6 +95,7 @@ version = "0.2.23" features = ["std"] [dev-dependencies] +stackslib = { path = ".", features = ["testing"] } assert-json-diff = "1.0.0" criterion = "0.3.5" stdext = "0.3.1" @@ -112,10 +113,10 @@ disable-costs = [] developer-mode = ["clarity/developer-mode"] monitoring_prom = ["prometheus"] slog_json = ["slog-json", "stacks-common/slog_json", "clarity/slog_json", "pox-locking/slog_json"] -testing = [] +testing = ["dep:proptest", "stacks-common/testing", "clarity/testing"] -[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(any(target_os="windows"))))'.dependencies] +[target.'cfg(all(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64"), not(target_env = "msvc")))'.dependencies] sha2 = { version = "0.10", features = ["asm"] } -[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), any(target_os="windows")))'.dependencies] +[target.'cfg(any(not(any(target_arch = "x86_64", target_arch = "x86", target_arch = "aarch64")), target_env = "msvc"))'.dependencies] sha2 = { version = "0.10" } diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index afeaefc0dc3..2fb1f8a4930 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -47,7 +47,7 @@ pub fn parse_script<'a>(script: &'a Script) -> Vec> { impl BitcoinTxInputStructured { /// Parse a script instruction stream encoding a p2pkh scritpsig into a BitcoinTxInput - pub fn from_bitcoin_p2pkh_script_sig( + fn from_bitcoin_p2pkh_script_sig( instructions: &Vec, input_txid: (Txid, u32), ) -> Option { diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 40cabd86d30..51de78a53f0 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -845,11 +845,7 @@ impl BitcoinIndexer { } } else { // ignore the reorg - test_debug!( - "Reorg chain does not overtake original Bitcoin chain ({} >= {})", - orig_total_work, - reorg_total_work - ); + test_debug!("Reorg chain does not overtake original Bitcoin chain"); new_tip = orig_spv_client.get_headers_height()?; } } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index d85631c147f..e3947bd5a8a 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -53,7 +53,7 @@ use crate::chainstate::burn::distribution::BurnSamplePoint; use crate::chainstate::burn::operations::leader_block_commit::MissedBlockCommit; use crate::chainstate::burn::operations::{ BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, PreStxOp, - StackStxOp, TransferStxOp, VoteForAggregateKeyOp, + StackStxOp, TransferStxOp, }; use crate::chainstate::burn::{BlockSnapshot, Opcodes}; use crate::chainstate::coordinator::comm::CoordinatorChannels; @@ -133,9 +133,6 @@ impl BurnchainStateTransition { all_block_commits.insert(op.txid.clone(), op.clone()); block_commits.push(op.clone()); } - BlockstackOperationType::VoteForAggregateKey(_) => { - accepted_ops.push(block_ops[i].clone()); - } }; } @@ -854,35 +851,6 @@ impl Burnchain { None } } - x if x == Opcodes::VoteForAggregateKey as u8 => { - let pre_stx_txid = VoteForAggregateKeyOp::get_sender_txid(burn_tx).ok()?; - let pre_stx_tx = match pre_stx_op_map.get(&pre_stx_txid) { - Some(tx_ref) => Some(BlockstackOperationType::PreStx(tx_ref.clone())), - None => burnchain_db.find_burnchain_op(indexer, pre_stx_txid), - }; - if let Some(BlockstackOperationType::PreStx(pre_stx)) = pre_stx_tx { - let sender = &pre_stx.output; - match VoteForAggregateKeyOp::from_tx(block_header, burn_tx, sender) { - Ok(op) => Some(BlockstackOperationType::VoteForAggregateKey(op)), - Err(e) => { - warn!( - "Failed to parse vote-for-aggregate-key tx"; - "txid" => %burn_tx.txid(), - "data" => %to_hex(&burn_tx.data()), - "error" => ?e, - ); - None - } - } - } else { - warn!( - "Failed to find corresponding input to VoteForAggregateKeyOp"; - "txid" => %burn_tx.txid().to_string(), - "pre_stx_txid" => %pre_stx_txid.to_string() - ); - None - } - } _ => None, } diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 3454924b559..67c0f24a3c9 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -37,7 +37,7 @@ use crate::util_lib::db::{ }; pub struct BurnchainDB { - pub(crate) conn: Connection, + conn: Connection, } pub struct BurnchainDBTransaction<'a> { @@ -140,7 +140,7 @@ impl FromRow for BlockCommitMetadata { /// Apply safety checks on extracted blockstack transactions /// - put them in order by vtxindex /// - make sure there are no vtxindex duplicates -pub(crate) fn apply_blockstack_txs_safety_checks( +fn apply_blockstack_txs_safety_checks( block_height: u64, blockstack_txs: &mut Vec, ) -> () { @@ -309,11 +309,11 @@ const BURNCHAIN_DB_INDEXES: &'static [&'static str] = &[ impl<'a> BurnchainDBTransaction<'a> { /// Store a burnchain block header into the burnchain database. /// Returns the row ID on success. - pub(crate) fn store_burnchain_db_entry( + fn store_burnchain_db_entry( &self, header: &BurnchainBlockHeader, - ) -> Result<(), BurnchainError> { - let sql = "INSERT OR IGNORE INTO burnchain_db_block_headers + ) -> Result { + let sql = "INSERT INTO burnchain_db_block_headers (block_height, block_hash, parent_block_hash, num_txs, timestamp) VALUES (?, ?, ?, ?, ?)"; let args: &[&dyn ToSql] = &[ @@ -323,15 +323,10 @@ impl<'a> BurnchainDBTransaction<'a> { &u64_to_sql(header.num_txs)?, &u64_to_sql(header.timestamp)?, ]; - let affected_rows = self.sql_tx.execute(sql, args)?; - if affected_rows == 0 { - // This means a duplicate entry was found and the insert operation was ignored - debug!( - "Duplicate entry for block_hash: {}, insert operation ignored.", - header.block_hash - ); + match self.sql_tx.execute(sql, args) { + Ok(_) => Ok(self.sql_tx.last_insert_rowid()), + Err(e) => Err(e.into()), } - Ok(()) } /// Add an affirmation map into the database. Returns the affirmation map ID. @@ -884,7 +879,7 @@ impl<'a> BurnchainDBTransaction<'a> { Ok(()) } - pub(crate) fn store_blockstack_ops( + fn store_blockstack_ops( &self, burnchain: &Burnchain, indexer: &B, @@ -948,8 +943,7 @@ impl<'a> BurnchainDBTransaction<'a> { affirmation_map: AffirmationMap, ) -> Result<(), DBError> { assert_eq!((affirmation_map.len() as u64) + 1, reward_cycle); - let qry = - "INSERT OR REPLACE INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; + let qry = "INSERT INTO overrides (reward_cycle, affirmation_map) VALUES (?1, ?2)"; let args: &[&dyn ToSql] = &[&u64_to_sql(reward_cycle)?, &affirmation_map.encode()]; let mut stmt = self.sql_tx.prepare(qry)?; @@ -1107,6 +1101,13 @@ impl BurnchainDB { BurnchainDB::inner_get_canonical_chain_tip(&self.conn) } + #[cfg(test)] + pub fn get_first_header(&self) -> Result { + let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height ASC, block_hash DESC LIMIT 1"; + let opt = query_row(&self.conn, qry, NO_PARAMS)?; + opt.ok_or(BurnchainError::MissingParentBlock) + } + pub fn has_burnchain_block_at_height( conn: &DBConn, height: u64, @@ -1415,6 +1416,34 @@ impl BurnchainDB { Ok(blockstack_ops) } + #[cfg(test)] + pub fn raw_store_burnchain_block( + &mut self, + burnchain: &Burnchain, + indexer: &B, + header: BurnchainBlockHeader, + mut blockstack_ops: Vec, + ) -> Result<(), BurnchainError> { + apply_blockstack_txs_safety_checks(header.block_height, &mut blockstack_ops); + + let db_tx = self.tx_begin()?; + + test_debug!( + "Store raw block {},{} (parent {}) with {} ops", + &header.block_hash, + header.block_height, + &header.parent_block_hash, + blockstack_ops.len() + ); + + db_tx.store_burnchain_db_entry(&header)?; + db_tx.store_blockstack_ops(burnchain, indexer, &header, &blockstack_ops)?; + + db_tx.commit()?; + + Ok(()) + } + pub fn get_block_commit( conn: &DBConn, burn_header_hash: &BurnchainHeaderHash, diff --git a/stackslib/src/burnchains/mod.rs b/stackslib/src/burnchains/mod.rs index 26511e152cc..aa3c8332370 100644 --- a/stackslib/src/burnchains/mod.rs +++ b/stackslib/src/burnchains/mod.rs @@ -469,8 +469,8 @@ impl PoxConstants { pub fn regtest_default() -> PoxConstants { PoxConstants::new( 5, - 3, - 2, + 1, + 1, 3333333333333333, 1, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, @@ -534,11 +534,6 @@ impl PoxConstants { first_block_height + reward_cycle * u64::from(self.reward_cycle_length) + 1 } - pub fn reward_cycle_index(&self, first_block_height: u64, burn_height: u64) -> Option { - let effective_height = burn_height.checked_sub(first_block_height)?; - Some(effective_height % u64::from(self.reward_cycle_length)) - } - pub fn block_height_to_reward_cycle( &self, first_block_height: u64, diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 9c3b5ee4770..7b2a87be4cd 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -16,7 +16,6 @@ use std::cmp; -use rusqlite::{ToSql, NO_PARAMS}; use stacks_common::address::AddressHashMode; use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction as BtcTx; use stacks_common::deps_common::bitcoin::network::serialize::deserialize; @@ -28,8 +27,7 @@ use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::bitcoin::address::*; use crate::burnchains::bitcoin::blocks::*; use crate::burnchains::bitcoin::*; -use crate::burnchains::db::apply_blockstack_txs_safety_checks; -use crate::burnchains::{Error as BurnchainError, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; +use crate::burnchains::{PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; use crate::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; use crate::chainstate::burn::*; use crate::chainstate::coordinator::tests::next_txid; @@ -39,55 +37,6 @@ use crate::chainstate::stacks::*; use crate::core::{StacksEpochId, BITCOIN_REGTEST_FIRST_BLOCK_HASH}; use crate::util_lib::db::Error as DBError; -impl BurnchainDB { - pub fn get_first_header(&self) -> Result { - let qry = "SELECT * FROM burnchain_db_block_headers ORDER BY block_height ASC, block_hash DESC LIMIT 1"; - let opt = query_row(&self.conn, qry, NO_PARAMS)?; - opt.ok_or(BurnchainError::MissingParentBlock) - } - - /// Get back all of the parsed burnchain operations for a given block. - /// Used in testing to replay burnchain data. - #[cfg(test)] - pub fn get_burnchain_block_ops( - &self, - block_hash: &BurnchainHeaderHash, - ) -> Result, BurnchainError> { - let sql = "SELECT op FROM burnchain_db_block_ops WHERE block_hash = ?1"; - let args: &[&dyn ToSql] = &[block_hash]; - let mut ops: Vec = query_rows(&self.conn, sql, args)?; - ops.sort_by(|a, b| a.vtxindex().cmp(&b.vtxindex())); - Ok(ops) - } - - pub fn raw_store_burnchain_block( - &mut self, - burnchain: &Burnchain, - indexer: &B, - header: BurnchainBlockHeader, - mut blockstack_ops: Vec, - ) -> Result<(), BurnchainError> { - apply_blockstack_txs_safety_checks(header.block_height, &mut blockstack_ops); - - let db_tx = self.tx_begin()?; - - test_debug!( - "Store raw block {},{} (parent {}) with {} ops", - &header.block_hash, - header.block_height, - &header.parent_block_hash, - blockstack_ops.len() - ); - - db_tx.store_burnchain_db_entry(&header)?; - db_tx.store_blockstack_ops(burnchain, indexer, &header, &blockstack_ops)?; - - db_tx.commit()?; - - Ok(()) - } -} - impl BurnchainHeaderReader for Vec { fn read_burnchain_headers( &self, diff --git a/stackslib/src/chainstate/burn/db/processing.rs b/stackslib/src/chainstate/burn/db/processing.rs index 760188829c2..dadfcdba718 100644 --- a/stackslib/src/chainstate/burn/db/processing.rs +++ b/stackslib/src/chainstate/burn/db/processing.rs @@ -93,13 +93,6 @@ impl<'a> SortitionHandleTx<'a> { ); BurnchainError::OpError(e) }), - BlockstackOperationType::VoteForAggregateKey(ref op) => op.check().map_err(|e| { - warn!( - "REJECTED({}) vote for aggregate key op {} at {},{}: {:?}", - op.block_height, &op.txid, op.block_height, op.vtxindex, &e - ); - BurnchainError::OpError(e) - }), } } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index a021f128440..d027f6ffd91 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -38,7 +38,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, TrieHash, VRFSeed, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; use stacks_common::util::vrf::*; @@ -59,7 +58,7 @@ use crate::chainstate::burn::operations::leader_block_commit::{ }; use crate::chainstate::burn::operations::{ BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, PreStxOp, - StackStxOp, TransferStxOp, VoteForAggregateKeyOp, + StackStxOp, TransferStxOp, }; use crate::chainstate::burn::{ BlockSnapshot, ConsensusHash, ConsensusHashExtensions, Opcodes, OpsHash, SortitionHash, @@ -319,19 +318,6 @@ impl FromRow for StackStxOp { let stacked_ustx = u128::from_str_radix(&stacked_ustx_str, 10) .expect("CORRUPTION: bad u128 written to sortdb"); let num_cycles = row.get_unwrap("num_cycles"); - let signing_key_str_opt: Option = row.get("signer_key")?; - let signer_key = match signing_key_str_opt { - Some(key_str) => serde_json::from_str(&key_str).ok(), - None => None, - }; - let max_amount_str_opt: Option = row.get("max_amount")?; - let max_amount = match max_amount_str_opt { - Some(max_amount_str) => u128::from_str_radix(&max_amount_str, 10) - .map_err(|_| db_error::ParseError) - .ok(), - None => None, - }; - let auth_id = row.get("auth_id")?; Ok(StackStxOp { txid, @@ -342,9 +328,6 @@ impl FromRow for StackStxOp { reward_addr, stacked_ustx, num_cycles, - signer_key, - max_amount, - auth_id, }) } } @@ -409,39 +392,6 @@ impl FromRow for TransferStxOp { } } -impl FromRow for VoteForAggregateKeyOp { - fn from_row<'a>(row: &'a Row) -> Result { - let txid = Txid::from_column(row, "txid")?; - let vtxindex: u32 = row.get_unwrap("vtxindex"); - let block_height = u64::from_column(row, "block_height")?; - let burn_header_hash = BurnchainHeaderHash::from_column(row, "burn_header_hash")?; - - let sender = StacksAddress::from_column(row, "sender_addr")?; - let aggregate_key_str: String = row.get_unwrap("aggregate_key"); - let aggregate_key: StacksPublicKeyBuffer = serde_json::from_str(&aggregate_key_str) - .expect("CORRUPTION: DB stored bad transition ops"); - let round: u32 = row.get_unwrap("round"); - let reward_cycle = u64::from_column(row, "reward_cycle")?; - let signer_index: u16 = row.get_unwrap("signer_index"); - let signer_key_str: String = row.get_unwrap("signer_key"); - let signer_key: StacksPublicKeyBuffer = serde_json::from_str(&signer_key_str) - .expect("CORRUPTION: DB stored bad transition ops"); - - Ok(VoteForAggregateKeyOp { - txid, - vtxindex, - block_height, - burn_header_hash, - sender, - aggregate_key, - round, - reward_cycle, - signer_index, - signer_key, - }) - } -} - impl FromColumn for ASTRules { fn from_column<'a>(row: &'a Row, column_name: &str) -> Result { let x: u8 = row.get_unwrap(column_name); @@ -700,7 +650,7 @@ const SORTITION_DB_SCHEMA_6: &'static [&'static str] = &[r#" const SORTITION_DB_SCHEMA_7: &'static [&'static str] = &[r#" DELETE FROM epochs;"#]; -const LAST_SORTITION_DB_INDEX: &'static str = "index_vote_for_aggregate_key_burn_header_hash"; +const LAST_SORTITION_DB_INDEX: &'static str = "index_delegate_stx_burn_header_hash"; const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ r#"ALTER TABLE snapshots ADD miner_pk_hash TEXT DEFAULT NULL"#, r#" @@ -718,26 +668,6 @@ const SORTITION_DB_SCHEMA_8: &'static [&'static str] = &[ block_hash TEXT NOT NULL, block_height INTEGER NOT NULL );"#, - r#"ALTER TABLE stack_stx ADD signer_key TEXT DEFAULT NULL;"#, - r#"ALTER TABLE stack_stx ADD max_amount TEXT DEFAULT NULL;"#, - r#"ALTER TABLE stack_stx ADD auth_id INTEGER DEFAULT NULL;"#, - r#" - -- table definition for `vote-for-aggregate-key` burn op - CREATE TABLE vote_for_aggregate_key ( - txid TEXT NOT NULL, - vtxindex INTEGER NOT NULL, - block_height INTEGER NOT NULL, - burn_header_hash TEXT NOT NULL, - - sender_addr TEXT NOT NULL, - aggregate_key TEXT NOT NULL, - round INTEGER NOT NULL, - reward_cycle INTEGER NOT NULL, - signer_index INTEGER NOT NULL, - signer_key TEXT NOT NULL, - - PRIMARY KEY(txid,burn_header_Hash) - );"#, ]; const SORTITION_DB_INDEXES: &'static [&'static str] = &[ @@ -760,7 +690,6 @@ const SORTITION_DB_INDEXES: &'static [&'static str] = &[ "CREATE INDEX IF NOT EXISTS index_pox_payouts ON snapshots(pox_payouts);", "CREATE INDEX IF NOT EXISTS index_burn_header_hash_pox_valid ON snapshots(burn_header_hash,pox_valid);", "CREATE INDEX IF NOT EXISTS index_delegate_stx_burn_header_hash ON delegate_stx(burn_header_hash);", - "CREATE INDEX IF NOT EXISTS index_vote_for_aggregate_key_burn_header_hash ON vote_for_aggregate_key(burn_header_hash);", ]; pub struct SortitionDB { @@ -1488,6 +1417,9 @@ impl<'a> SortitionHandleTx<'a> { stacks_block_hash: &BlockHeaderHash, stacks_block_height: u64, ) -> Result<(), db_error> { + // NOTE: chain_tip here is the tip of the PoX fork on the canonical burn chain fork. + // consensus_hash refers to the consensus hash of the tip of the canonical Stacks fork + // we're updating. let chain_tip = SortitionDB::get_block_snapshot(self, &self.context.chain_tip)?.expect( "FAIL: Setting stacks block accepted in canonical chain tip which cannot be found", ); @@ -1730,6 +1662,23 @@ impl<'a> SortitionHandleTx<'a> { Ok(()) } + /// Update the canonical Stacks tip (testing only) + #[cfg(test)] + pub fn test_update_canonical_stacks_tip( + &mut self, + sort_id: &SortitionId, + consensus_hash: &ConsensusHash, + stacks_block_hash: &BlockHeaderHash, + stacks_block_height: u64, + ) -> Result<(), db_error> { + self.update_canonical_stacks_tip( + sort_id, + consensus_hash, + stacks_block_hash, + stacks_block_height, + ) + } + /// Mark an existing snapshot's stacks block as accepted at a particular burn chain tip within a PoX fork (identified by the consensus hash), /// and calculate and store its arrival index. /// If this Stacks block extends the canonical stacks chain tip, then also update the memoized canonical @@ -1831,7 +1780,7 @@ impl<'a> SortitionHandleConn<'a> { /// Does the sortition db expect to receive blocks /// signed by this signer set? /// - /// This only works if `consensus_hash` is within two reward cycles (4200 blocks) of the + /// This only works if `consensus_hash` is within one reward cycle (2100 blocks) of the /// sortition pointed to by this handle's sortiton tip. If it isn't, then this /// method returns Ok(false). This is to prevent a DDoS vector whereby compromised stale /// Signer keys can be used to blast out lots of Nakamoto blocks that will be accepted @@ -1858,14 +1807,7 @@ impl<'a> SortitionHandleConn<'a> { e })?; - if ch_sn.block_height - + u64::from( - self.context - .pox_constants - .reward_cycle_length - .saturating_mul(1), - ) - + u64::from(self.context.pox_constants.prepare_length) + if ch_sn.block_height + u64::from(self.context.pox_constants.reward_cycle_length) < sn.block_height { // too far in the past @@ -2734,6 +2676,113 @@ impl SortitionDB { Ok(db) } + /// Open a burn database at random tmp dir (used for testing) + #[cfg(test)] + pub fn connect_test( + first_block_height: u64, + first_burn_hash: &BurnchainHeaderHash, + ) -> Result { + use crate::core::StacksEpochExtension; + SortitionDB::connect_test_with_epochs( + first_block_height, + first_burn_hash, + StacksEpoch::unit_test(StacksEpochId::Epoch20, first_block_height), + ) + } + + /// Open a burn database at random tmp dir (used for testing) + /// But, take a particular epoch configuration + #[cfg(test)] + pub fn connect_test_with_epochs( + first_block_height: u64, + first_burn_hash: &BurnchainHeaderHash, + epochs: Vec, + ) -> Result { + let mut rng = rand::thread_rng(); + let mut buf = [0u8; 32]; + rng.fill_bytes(&mut buf); + let db_path_dir = format!( + "/tmp/stacks-node-tests/unit-tests-sortdb/db-{}", + to_hex(&buf) + ); + + SortitionDB::connect( + &db_path_dir, + first_block_height, + first_burn_hash, + get_epoch_time_secs(), + &epochs, + PoxConstants::test_default(), + true, + ) + } + + #[cfg(test)] + pub fn connect_v1( + path: &str, + first_block_height: u64, + first_burn_hash: &BurnchainHeaderHash, + first_burn_header_timestamp: u64, + readwrite: bool, + ) -> Result { + let create_flag = match fs::metadata(path) { + Err(e) => { + if e.kind() == ErrorKind::NotFound { + // need to create + if readwrite { + true + } else { + return Err(db_error::NoDBError); + } + } else { + return Err(db_error::IOError(e)); + } + } + Ok(_md) => false, + }; + + let index_path = db_mkdirs(path)?; + debug!( + "Connect/Open {} sortdb '{}' as '{}'", + if create_flag { "(create)" } else { "" }, + index_path, + if readwrite { "readwrite" } else { "readonly" } + ); + + let marf = SortitionDB::open_index(&index_path)?; + + let mut db = SortitionDB { + path: path.to_string(), + marf, + readwrite, + first_block_height, + first_burn_header_hash: first_burn_hash.clone(), + pox_constants: PoxConstants::test_default(), + }; + + if create_flag { + // instantiate! + db.instantiate_v1( + first_block_height, + first_burn_hash, + first_burn_header_timestamp, + )?; + } else { + // validate -- must contain the given first block and first block hash + let snapshot = SortitionDB::get_first_block_snapshot(db.conn())?; + if !snapshot.is_initial() + || snapshot.block_height != first_block_height + || snapshot.burn_header_hash != *first_burn_hash + { + error!("Invalid genesis snapshot: sn.is_initial = {}, sn.block_height = {}, sn.burn_hash = {}, expect.block_height = {}, expect.burn_hash = {}", + snapshot.is_initial(), snapshot.block_height, &snapshot.burn_header_hash, first_block_height, first_burn_hash); + return Err(db_error::Corruption); + } + } + + Ok(db) + } + fn instantiate( &mut self, first_block_height: u64, @@ -2819,6 +2868,98 @@ impl SortitionDB { Ok(()) } + #[cfg(test)] + fn instantiate_v1( + &mut self, + first_block_height: u64, + first_burn_header_hash: &BurnchainHeaderHash, + first_burn_header_timestamp: u64, + ) -> Result<(), db_error> { + debug!("Instantiate SortDB"); + + sql_pragma(self.conn(), "journal_mode", &"WAL")?; + sql_pragma(self.conn(), "foreign_keys", &true)?; + + let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; + + // create first (sentinel) snapshot + debug!("Make first snapshot"); + let mut first_snapshot = BlockSnapshot::initial( + first_block_height, + first_burn_header_hash, + first_burn_header_timestamp, + ); + + assert!(first_snapshot.parent_burn_header_hash != first_snapshot.burn_header_hash); + assert_eq!( + first_snapshot.parent_burn_header_hash, + BurnchainHeaderHash::sentinel() + ); + + for row_text in SORTITION_DB_INITIAL_SCHEMA { + db_tx.execute_batch(row_text)?; + } + + db_tx.execute( + "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", + &[&"1"], + )?; + + db_tx.instantiate_index()?; + + let mut first_sn = first_snapshot.clone(); + first_sn.sortition_id = SortitionId::sentinel(); + let (index_root, pox_payout) = + db_tx.index_add_fork_info(&mut first_sn, &first_snapshot, &vec![], None, None, None)?; + first_snapshot.index_root = index_root; + + // manually insert the first block snapshot in instantiate_v1 testing code, because + // SCHEMA_9 adds a new column + let pox_payouts_json = serde_json::to_string(&pox_payout) + .expect("FATAL: could not encode `total_pox_payouts` as JSON"); + + let args = rusqlite::params![ + &u64_to_sql(first_snapshot.block_height)?, + &first_snapshot.burn_header_hash, + &u64_to_sql(first_snapshot.burn_header_timestamp)?, + &first_snapshot.parent_burn_header_hash, + &first_snapshot.consensus_hash, + &first_snapshot.ops_hash, + &first_snapshot.total_burn.to_string(), + &first_snapshot.sortition, + &first_snapshot.sortition_hash, + &first_snapshot.winning_block_txid, + &first_snapshot.winning_stacks_block_hash, + &first_snapshot.index_root, + &u64_to_sql(first_snapshot.num_sortitions)?, + &first_snapshot.stacks_block_accepted, + &u64_to_sql(first_snapshot.stacks_block_height)?, + &u64_to_sql(first_snapshot.arrival_index)?, + &u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, + &first_snapshot.canonical_stacks_tip_hash, + &first_snapshot.canonical_stacks_tip_consensus_hash, + &first_snapshot.sortition_id, + &first_snapshot.parent_sortition_id, + &first_snapshot.pox_valid, + &first_snapshot.accumulated_coinbase_ustx.to_string(), + &pox_payouts_json, + ]; + + db_tx.execute("INSERT INTO snapshots \ + (block_height, burn_header_hash, burn_header_timestamp, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions, \ + stacks_block_accepted, stacks_block_height, arrival_index, canonical_stacks_tip_height, canonical_stacks_tip_hash, canonical_stacks_tip_consensus_hash, sortition_id, parent_sortition_id, pox_valid, accumulated_coinbase_ustx, \ + pox_payouts) \ + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", args)?; + + db_tx.store_transition_ops( + &first_snapshot.sortition_id, + &BurnchainStateTransition::noop(), + )?; + + db_tx.commit()?; + Ok(()) + } + /// Get a block commit by its content-addressed location in a specific sortition. pub fn get_block_commit( conn: &Connection, @@ -3275,54 +3416,6 @@ impl SortitionDB { Ok(()) } - /// Figure out the reward cycle for `tip` and lookup the preprocessed - /// reward set (if it exists) for the active reward cycle during `tip` - pub fn get_preprocessed_reward_set_of( - &self, - tip: &SortitionId, - ) -> Result, db_error> { - let tip_sn = SortitionDB::get_block_snapshot(self.conn(), tip)?.ok_or_else(|| { - error!( - "Could not find snapshot for sortition while fetching reward set"; - "tip_sortition_id" => %tip, - ); - db_error::NotFoundError - })?; - - let reward_cycle_id = self - .pox_constants - .block_height_to_reward_cycle(self.first_block_height, tip_sn.block_height) - .expect("FATAL: stored snapshot with block height < first_block_height"); - - let prepare_phase_start = self - .pox_constants - .reward_cycle_to_block_height(self.first_block_height, reward_cycle_id) - .saturating_sub(self.pox_constants.prepare_length.into()); - - let first_sortition = get_ancestor_sort_id( - &self.index_conn(), - prepare_phase_start, - &tip_sn.sortition_id, - )? - .ok_or_else(|| { - error!( - "Could not find prepare phase start ancestor while fetching reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_height" => prepare_phase_start - ); - db_error::NotFoundError - })?; - - info!("Fetching preprocessed reward set"; - "tip_sortition_id" => %tip, - "reward_cycle_id" => reward_cycle_id, - "prepare_phase_start_sortition_id" => %first_sortition, - ); - - Self::get_preprocessed_reward_set(self.conn(), &first_sortition) - } - /// Get a pre-processed reawrd set. /// `sortition_id` is the first sortition ID of the prepare phase. pub fn get_preprocessed_reward_set( @@ -3937,6 +4030,16 @@ impl SortitionDB { Ok((new_snapshot.0, new_snapshot.1)) } + #[cfg(test)] + pub fn test_get_next_block_recipients( + &mut self, + burnchain: &Burnchain, + next_pox_info: Option<&RewardCycleInfo>, + ) -> Result, BurnchainError> { + let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; + self.get_next_block_recipients(burnchain, &parent_snapshot, next_pox_info) + } + pub fn get_next_block_recipients( &mut self, burnchain: &Burnchain, @@ -4067,13 +4170,26 @@ impl SortitionDB { .unwrap_or(&burnchain.first_block_hash) .clone(); + let rc = burnchain + .block_height_to_reward_cycle(chain_tip.block_height) + .expect("FATAL: block height does not have a reward cycle"); + + let rc_height = burnchain.reward_cycle_to_block_height(rc); + let rc_consensus_hash = SortitionDB::get_ancestor_snapshot( + conn, + cmp::min(chain_tip.block_height, rc_height), + &chain_tip.sortition_id, + )? + .map(|sn| sn.consensus_hash) + .ok_or(db_error::NotFoundError)?; + test_debug!( "Chain view: {},{}-{},{},{}", chain_tip.block_height, chain_tip.burn_header_hash, stable_block_height, &burn_stable_block_hash, - &chain_tip.canonical_stacks_tip_consensus_hash, + &rc_consensus_hash, ); Ok(BurnchainView { burn_block_height: chain_tip.block_height, @@ -4081,7 +4197,7 @@ impl SortitionDB { burn_stable_block_height: stable_block_height, burn_stable_block_hash: burn_stable_block_hash, last_burn_block_hashes: last_burn_block_hashes, - rc_consensus_hash: chain_tip.canonical_stacks_tip_consensus_hash, + rc_consensus_hash, }) } } @@ -4171,20 +4287,6 @@ impl SortitionDB { ) } - /// Get the list of `vote-for-aggregate-key` operations processed in a given burnchain block. - /// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic - /// to reject them. - pub fn get_vote_for_aggregate_key_ops( - conn: &Connection, - burn_header_hash: &BurnchainHeaderHash, - ) -> Result, db_error> { - query_rows( - conn, - "SELECT * FROM vote_for_aggregate_key WHERE burn_header_hash = ? ORDER BY vtxindex", - &[burn_header_hash], - ) - } - /// Get the list of Transfer-STX operations processed in a given burnchain block. /// This will be the same list in each PoX fork; it's up to the Stacks block-processing logic /// to reject them. @@ -4740,6 +4842,18 @@ impl SortitionDB { } } + /// Given the last_tenure_id (e.g. in a block-commit in Nakamoto), find its sortition in the + /// given sortition fork. + #[cfg(test)] + pub fn get_block_snapshot_for_winning_nakamoto_tenure( + ic: &SortitionDBConn, + tip: &SortitionId, + last_tenure_id: &StacksBlockId, + ) -> Result, db_error> { + let block_hash = BlockHeaderHash(last_tenure_id.0.clone()); + Self::get_block_snapshot_for_winning_stacks_block(ic, tip, &block_hash) + } + /// Merge the result of get_stacks_header_hashes() into a BlockHeaderCache pub fn merge_block_header_cache( cache: &mut BlockHeaderCache, @@ -4767,6 +4881,38 @@ impl SortitionDB { debug!("Block header cache has {} items", cache.len()); } + /// Get a blockstack burnchain operation by txid + #[cfg(test)] + pub fn get_burnchain_transaction( + conn: &Connection, + txid: &Txid, + ) -> Result, db_error> { + // leader key? + let leader_key_sql = "SELECT * FROM leader_keys WHERE txid = ?1 LIMIT 1"; + let args = [&txid]; + + let leader_key_res = query_row_panic(conn, &leader_key_sql, &args, || { + "Multiple leader keys with same txid".to_string() + })?; + if let Some(leader_key) = leader_key_res { + return Ok(Some(BlockstackOperationType::LeaderKeyRegister(leader_key))); + } + + // block commit? + let block_commit_sql = "SELECT * FROM block_commits WHERE txid = ?1 LIMIT 1"; + + let block_commit_res = query_row_panic(conn, &block_commit_sql, &args, || { + "Multiple block commits with same txid".to_string() + })?; + if let Some(block_commit) = block_commit_res { + return Ok(Some(BlockstackOperationType::LeaderBlockCommit( + block_commit, + ))); + } + + Ok(None) + } + /// Get the StacksEpoch for a given burn block height pub fn get_stacks_epoch( conn: &DBConn, @@ -4809,18 +4955,6 @@ impl SortitionDB { query_row(conn, sql, args) } - /// Are microblocks disabled by Epoch 2.5 at the height specified - /// in `at_burn_height`? - pub fn are_microblocks_disabled(conn: &DBConn, at_burn_height: u64) -> Result { - match Self::get_stacks_epoch_by_epoch_id(conn, &StacksEpochId::Epoch25)? { - Some(epoch_25) => Ok(at_burn_height >= epoch_25.start_height), - None => { - // Epoch 2.5 is not defined, so it cannot disable microblocks - Ok(false) - } - } - } - /// Get the last reward cycle in epoch 2.05 pub fn get_last_epoch_2_05_reward_cycle(&self) -> Result { Self::static_get_last_epoch_2_05_reward_cycle( @@ -4959,10 +5093,8 @@ impl<'a> SortitionHandleTx<'a> { canonical_stacks_tip_block_hash, canonical_stacks_tip_height, ) = res?; - debug!( - "Setting stacks_chain_tips values"; - "sortition_id" => %sn.sortition_id, - "parent_sortition_id" => %parent_snapshot.sortition_id, + info!( + "Setting initial stacks_chain_tips values"; "stacks_tip_height" => canonical_stacks_tip_height, "stacks_tip_hash" => %canonical_stacks_tip_block_hash, "stacks_tip_consensus" => %canonical_stacks_tip_consensus_hash @@ -5126,13 +5258,6 @@ impl<'a> SortitionHandleTx<'a> { ); self.insert_delegate_stx(op) } - BlockstackOperationType::VoteForAggregateKey(ref op) => { - info!( - "ACCEPTED({}) vote for aggregate key {} at {},{}", - op.block_height, &op.txid, op.block_height, op.vtxindex - ); - self.insert_vote_for_aggregate_key(op) - } } } @@ -5174,12 +5299,9 @@ impl<'a> SortitionHandleTx<'a> { &op.reward_addr.to_db_string(), &op.stacked_ustx.to_string(), &op.num_cycles, - &serde_json::to_string(&op.signer_key).unwrap(), - &serde_json::to_string(&op.max_amount).unwrap(), - &op.auth_id, ]; - self.execute("REPLACE INTO stack_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, reward_addr, stacked_ustx, num_cycles, signer_key, max_amount, auth_id) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)", args)?; + self.execute("REPLACE INTO stack_stx (txid, vtxindex, block_height, burn_header_hash, sender_addr, reward_addr, stacked_ustx, num_cycles) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", args)?; Ok(()) } @@ -5203,29 +5325,6 @@ impl<'a> SortitionHandleTx<'a> { Ok(()) } - /// Insert a vote-for-aggregate-key op - fn insert_vote_for_aggregate_key( - &mut self, - op: &VoteForAggregateKeyOp, - ) -> Result<(), db_error> { - let args: &[&dyn ToSql] = &[ - &op.txid, - &op.vtxindex, - &u64_to_sql(op.block_height)?, - &op.burn_header_hash, - &op.sender.to_string(), - &serde_json::to_string(&op.aggregate_key).unwrap(), - &op.round, - &u64_to_sql(op.reward_cycle)?, - &op.signer_index, - &serde_json::to_string(&op.signer_key).unwrap(), - ]; - - self.execute("REPLACE INTO vote_for_aggregate_key (txid, vtxindex, block_height, burn_header_hash, sender_addr, aggregate_key, round, reward_cycle, signer_index, signer_key) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10)", args)?; - - Ok(()) - } - /// Insert a transfer-stx op fn insert_transfer_stx(&mut self, op: &TransferStxOp) -> Result<(), db_error> { let args: &[&dyn ToSql] = &[ @@ -6104,292 +6203,6 @@ pub mod tests { use crate::core::{StacksEpochExtension, *}; use crate::util_lib::db::Error as db_error; - impl<'a> SortitionHandleTx<'a> { - /// Update the canonical Stacks tip (testing only) - pub fn test_update_canonical_stacks_tip( - &mut self, - sort_id: &SortitionId, - consensus_hash: &ConsensusHash, - stacks_block_hash: &BlockHeaderHash, - stacks_block_height: u64, - ) -> Result<(), db_error> { - self.update_canonical_stacks_tip( - sort_id, - consensus_hash, - stacks_block_hash, - stacks_block_height, - ) - } - } - - impl SortitionDB { - /// Open a burn database at random tmp dir (used for testing) - pub fn connect_test( - first_block_height: u64, - first_burn_hash: &BurnchainHeaderHash, - ) -> Result { - use crate::core::StacksEpochExtension; - SortitionDB::connect_test_with_epochs( - first_block_height, - first_burn_hash, - StacksEpoch::unit_test(StacksEpochId::Epoch20, first_block_height), - ) - } - - /// Open a burn database at random tmp dir (used for testing) - /// But, take a particular epoch configuration - pub fn connect_test_with_epochs( - first_block_height: u64, - first_burn_hash: &BurnchainHeaderHash, - epochs: Vec, - ) -> Result { - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 32]; - rng.fill_bytes(&mut buf); - let db_path_dir = format!( - "/tmp/stacks-node-tests/unit-tests-sortdb/db-{}", - to_hex(&buf) - ); - - SortitionDB::connect( - &db_path_dir, - first_block_height, - first_burn_hash, - get_epoch_time_secs(), - &epochs, - PoxConstants::test_default(), - true, - ) - } - - pub fn connect_v1( - path: &str, - first_block_height: u64, - first_burn_hash: &BurnchainHeaderHash, - first_burn_header_timestamp: u64, - readwrite: bool, - ) -> Result { - let create_flag = match fs::metadata(path) { - Err(e) => { - if e.kind() == ErrorKind::NotFound { - // need to create - if readwrite { - true - } else { - return Err(db_error::NoDBError); - } - } else { - return Err(db_error::IOError(e)); - } - } - Ok(_md) => false, - }; - - let index_path = db_mkdirs(path)?; - debug!( - "Connect/Open {} sortdb '{}' as '{}'", - if create_flag { "(create)" } else { "" }, - index_path, - if readwrite { "readwrite" } else { "readonly" } - ); - - let marf = SortitionDB::open_index(&index_path)?; - - let mut db = SortitionDB { - path: path.to_string(), - marf, - readwrite, - first_block_height, - first_burn_header_hash: first_burn_hash.clone(), - pox_constants: PoxConstants::test_default(), - }; - - if create_flag { - // instantiate! - db.instantiate_v1( - first_block_height, - first_burn_hash, - first_burn_header_timestamp, - )?; - } else { - // validate -- must contain the given first block and first block hash - let snapshot = SortitionDB::get_first_block_snapshot(db.conn())?; - if !snapshot.is_initial() - || snapshot.block_height != first_block_height - || snapshot.burn_header_hash != *first_burn_hash - { - error!("Invalid genesis snapshot: sn.is_initial = {}, sn.block_height = {}, sn.burn_hash = {}, expect.block_height = {}, expect.burn_hash = {}", - snapshot.is_initial(), snapshot.block_height, &snapshot.burn_header_hash, first_block_height, first_burn_hash); - return Err(db_error::Corruption); - } - } - - Ok(db) - } - - fn instantiate_v1( - &mut self, - first_block_height: u64, - first_burn_header_hash: &BurnchainHeaderHash, - first_burn_header_timestamp: u64, - ) -> Result<(), db_error> { - debug!("Instantiate SortDB"); - - sql_pragma(self.conn(), "journal_mode", &"WAL")?; - sql_pragma(self.conn(), "foreign_keys", &true)?; - - let mut db_tx = SortitionHandleTx::begin(self, &SortitionId::sentinel())?; - - // create first (sentinel) snapshot - debug!("Make first snapshot"); - let mut first_snapshot = BlockSnapshot::initial( - first_block_height, - first_burn_header_hash, - first_burn_header_timestamp, - ); - - assert!(first_snapshot.parent_burn_header_hash != first_snapshot.burn_header_hash); - assert_eq!( - first_snapshot.parent_burn_header_hash, - BurnchainHeaderHash::sentinel() - ); - - for row_text in SORTITION_DB_INITIAL_SCHEMA { - db_tx.execute_batch(row_text)?; - } - - db_tx.execute( - "INSERT OR REPLACE INTO db_config (version) VALUES (?1)", - &[&"1"], - )?; - - db_tx.instantiate_index()?; - - let mut first_sn = first_snapshot.clone(); - first_sn.sortition_id = SortitionId::sentinel(); - let (index_root, pox_payout) = db_tx.index_add_fork_info( - &mut first_sn, - &first_snapshot, - &vec![], - None, - None, - None, - )?; - first_snapshot.index_root = index_root; - - // manually insert the first block snapshot in instantiate_v1 testing code, because - // SCHEMA_9 adds a new column - let pox_payouts_json = serde_json::to_string(&pox_payout) - .expect("FATAL: could not encode `total_pox_payouts` as JSON"); - - let args = rusqlite::params![ - &u64_to_sql(first_snapshot.block_height)?, - &first_snapshot.burn_header_hash, - &u64_to_sql(first_snapshot.burn_header_timestamp)?, - &first_snapshot.parent_burn_header_hash, - &first_snapshot.consensus_hash, - &first_snapshot.ops_hash, - &first_snapshot.total_burn.to_string(), - &first_snapshot.sortition, - &first_snapshot.sortition_hash, - &first_snapshot.winning_block_txid, - &first_snapshot.winning_stacks_block_hash, - &first_snapshot.index_root, - &u64_to_sql(first_snapshot.num_sortitions)?, - &first_snapshot.stacks_block_accepted, - &u64_to_sql(first_snapshot.stacks_block_height)?, - &u64_to_sql(first_snapshot.arrival_index)?, - &u64_to_sql(first_snapshot.canonical_stacks_tip_height)?, - &first_snapshot.canonical_stacks_tip_hash, - &first_snapshot.canonical_stacks_tip_consensus_hash, - &first_snapshot.sortition_id, - &first_snapshot.parent_sortition_id, - &first_snapshot.pox_valid, - &first_snapshot.accumulated_coinbase_ustx.to_string(), - &pox_payouts_json, - ]; - - db_tx.execute("INSERT INTO snapshots \ - (block_height, burn_header_hash, burn_header_timestamp, parent_burn_header_hash, consensus_hash, ops_hash, total_burn, sortition, sortition_hash, winning_block_txid, winning_stacks_block_hash, index_root, num_sortitions, \ - stacks_block_accepted, stacks_block_height, arrival_index, canonical_stacks_tip_height, canonical_stacks_tip_hash, canonical_stacks_tip_consensus_hash, sortition_id, parent_sortition_id, pox_valid, accumulated_coinbase_ustx, \ - pox_payouts) \ - VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13, ?14, ?15, ?16, ?17, ?18, ?19, ?20, ?21, ?22, ?23, ?24)", args)?; - - db_tx.store_transition_ops( - &first_snapshot.sortition_id, - &BurnchainStateTransition::noop(), - )?; - - db_tx.commit()?; - Ok(()) - } - - pub fn test_get_next_block_recipients( - &mut self, - burnchain: &Burnchain, - next_pox_info: Option<&RewardCycleInfo>, - ) -> Result, BurnchainError> { - let parent_snapshot = SortitionDB::get_canonical_burn_chain_tip(self.conn())?; - self.get_next_block_recipients(burnchain, &parent_snapshot, next_pox_info) - } - - pub fn set_canonical_stacks_chain_tip( - conn: &Connection, - ch: &ConsensusHash, - bhh: &BlockHeaderHash, - height: u64, - ) -> Result<(), db_error> { - let tip = SortitionDB::get_canonical_burn_chain_tip(conn)?; - let args: &[&dyn ToSql] = &[ch, bhh, &u64_to_sql(height)?, &tip.sortition_id]; - conn.execute("UPDATE snapshots SET canonical_stacks_tip_consensus_hash = ?1, canonical_stacks_tip_hash = ?2, canonical_stacks_tip_height = ?3 - WHERE sortition_id = ?4", args) - .map_err(db_error::SqliteError)?; - Ok(()) - } - - /// Given the last_tenure_id (e.g. in a block-commit in Nakamoto), find its sortition in the - /// given sortition fork. - pub fn get_block_snapshot_for_winning_nakamoto_tenure( - ic: &SortitionDBConn, - tip: &SortitionId, - last_tenure_id: &StacksBlockId, - ) -> Result, db_error> { - let block_hash = BlockHeaderHash(last_tenure_id.0.clone()); - Self::get_block_snapshot_for_winning_stacks_block(ic, tip, &block_hash) - } - - /// Get a blockstack burnchain operation by txid - pub fn get_burnchain_transaction( - conn: &Connection, - txid: &Txid, - ) -> Result, db_error> { - // leader key? - let leader_key_sql = "SELECT * FROM leader_keys WHERE txid = ?1 LIMIT 1"; - let args = [&txid]; - - let leader_key_res = query_row_panic(conn, &leader_key_sql, &args, || { - "Multiple leader keys with same txid".to_string() - })?; - if let Some(leader_key) = leader_key_res { - return Ok(Some(BlockstackOperationType::LeaderKeyRegister(leader_key))); - } - - // block commit? - let block_commit_sql = "SELECT * FROM block_commits WHERE txid = ?1 LIMIT 1"; - - let block_commit_res = query_row_panic(conn, &block_commit_sql, &args, || { - "Multiple block commits with same txid".to_string() - })?; - if let Some(block_commit) = block_commit_res { - return Ok(Some(BlockstackOperationType::LeaderBlockCommit( - block_commit, - ))); - } - - Ok(None) - } - } - #[test] fn test_instantiate() { let first_burn_hash = BurnchainHeaderHash::from_hex( @@ -10153,11 +9966,6 @@ pub mod tests { ) .unwrap(); let mut db = SortitionDB::connect_test(block_height, &first_burn_hash).unwrap(); - let vote_pubkey = StacksPublicKey::from_hex( - "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", - ) - .unwrap(); - let vote_key: StacksPublicKeyBuffer = vote_pubkey.to_bytes_compressed().as_slice().into(); let good_ops = vec![ BlockstackOperationType::TransferStx(TransferStxOp { @@ -10176,9 +9984,6 @@ pub mod tests { reward_addr: PoxAddress::Standard(StacksAddress::new(4, Hash160([4u8; 20])), None), stacked_ustx: 456, num_cycles: 6, - signer_key: Some(StacksPublicKeyBuffer([0x02; 33])), - max_amount: Some(u128::MAX), - auth_id: Some(0u32), txid: Txid([0x02; 32]), vtxindex: 2, @@ -10203,19 +10008,6 @@ pub mod tests { block_height, burn_header_hash: first_burn_hash.clone(), }), - BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { - sender: StacksAddress::new(6, Hash160([6u8; 20])), - aggregate_key: vote_key, - signer_key: vote_key, - round: 1, - reward_cycle: 2, - signer_index: 3, - - txid: Txid([0x05; 32]), - vtxindex: 4, - block_height, - burn_header_hash: first_burn_hash.clone(), - }), ]; let mut tx = db.tx_begin_at_tip(); @@ -10246,13 +10038,6 @@ pub mod tests { good_ops[2] ); - let ops = SortitionDB::get_vote_for_aggregate_key_ops(db.conn(), &first_burn_hash).unwrap(); - assert_eq!(ops.len(), 1); - assert_eq!( - BlockstackOperationType::VoteForAggregateKey(ops[0].clone()), - good_ops[3] - ); - // if the same ops get mined in a different burnchain block, they will still be available let good_ops_2 = vec![ BlockstackOperationType::TransferStx(TransferStxOp { @@ -10271,9 +10056,6 @@ pub mod tests { reward_addr: PoxAddress::Standard(StacksAddress::new(4, Hash160([4u8; 20])), None), stacked_ustx: 456, num_cycles: 6, - signer_key: None, - max_amount: None, - auth_id: None, txid: Txid([0x02; 32]), vtxindex: 2, @@ -10295,19 +10077,6 @@ pub mod tests { block_height, burn_header_hash: fork_burn_hash.clone(), }), - BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { - sender: StacksAddress::new(6, Hash160([6u8; 20])), - aggregate_key: StacksPublicKeyBuffer([0x01; 33]), - signer_key: StacksPublicKeyBuffer([0x02; 33]), - round: 1, - reward_cycle: 2, - signer_index: 3, - - txid: Txid([0x05; 32]), - vtxindex: 4, - block_height, - burn_header_hash: fork_burn_hash.clone(), - }), ]; let mut tx = db.tx_begin_at_tip(); @@ -10339,13 +10108,6 @@ pub mod tests { good_ops[2] ); - let ops = SortitionDB::get_vote_for_aggregate_key_ops(db.conn(), &first_burn_hash).unwrap(); - assert_eq!(ops.len(), 1); - assert_eq!( - BlockstackOperationType::VoteForAggregateKey(ops[0].clone()), - good_ops[3] - ); - // and so are the new ones let ops = SortitionDB::get_transfer_stx_ops(db.conn(), &fork_burn_hash).unwrap(); assert_eq!(ops.len(), 1); @@ -10367,12 +10129,5 @@ pub mod tests { BlockstackOperationType::DelegateStx(ops[0].clone()), good_ops_2[2] ); - - let ops = SortitionDB::get_vote_for_aggregate_key_ops(db.conn(), &fork_burn_hash).unwrap(); - assert_eq!(ops.len(), 1); - assert_eq!( - BlockstackOperationType::VoteForAggregateKey(ops[0].clone()), - good_ops_2[3] - ); } } diff --git a/stackslib/src/chainstate/burn/mod.rs b/stackslib/src/chainstate/burn/mod.rs index b764344eb64..13f290d93b2 100644 --- a/stackslib/src/chainstate/burn/mod.rs +++ b/stackslib/src/chainstate/burn/mod.rs @@ -67,7 +67,6 @@ pub enum Opcodes { PreStx = 'p' as u8, TransferStx = '$' as u8, DelegateStx = '#' as u8, - VoteForAggregateKey = 'v' as u8, } // a burnchain block snapshot @@ -192,7 +191,6 @@ impl Opcodes { const HTTP_PEG_IN: &'static str = "peg_in"; const HTTP_PEG_OUT_REQUEST: &'static str = "peg_out_request"; const HTTP_PEG_OUT_FULFILL: &'static str = "peg_out_fulfill"; - const HTTP_VOTE_FOR_AGGREGATE_KEY: &'static str = "vote_for_aggregate_key"; pub fn to_http_str(&self) -> &'static str { match self { @@ -202,7 +200,6 @@ impl Opcodes { Opcodes::PreStx => Self::HTTP_PRE_STX, Opcodes::TransferStx => Self::HTTP_TRANSFER_STX, Opcodes::DelegateStx => Self::HTTP_DELEGATE_STX, - Opcodes::VoteForAggregateKey => Self::HTTP_VOTE_FOR_AGGREGATE_KEY, } } @@ -214,7 +211,6 @@ impl Opcodes { Self::HTTP_PRE_STX => Opcodes::PreStx, Self::HTTP_TRANSFER_STX => Opcodes::TransferStx, Self::HTTP_DELEGATE_STX => Opcodes::DelegateStx, - Self::HTTP_VOTE_FOR_AGGREGATE_KEY => Opcodes::VoteForAggregateKey, _ => return None, }; diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index dd609f40205..43d11691c00 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1299,8 +1299,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1353,8 +1351,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1430,8 +1426,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1474,8 +1468,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1527,8 +1519,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1604,8 +1594,6 @@ mod tests { }); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = 16843019; burnchain.pox_constants.sunset_end = 16843020; @@ -1723,8 +1711,6 @@ mod tests { ); let mut burnchain = Burnchain::regtest("nope"); - burnchain.pox_constants.prepare_length = 1; - burnchain.pox_constants.anchor_threshold = 1; burnchain.pox_constants.sunset_start = block_height; burnchain.pox_constants.sunset_end = block_height + 1; diff --git a/stackslib/src/chainstate/burn/operations/mod.rs b/stackslib/src/chainstate/burn/operations/mod.rs index 90f7f792911..e51a20f630d 100644 --- a/stackslib/src/chainstate/burn/operations/mod.rs +++ b/stackslib/src/chainstate/burn/operations/mod.rs @@ -22,7 +22,6 @@ use serde_json::json; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, TrieHash, VRFSeed, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{hex_bytes, to_hex, Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::VRFPublicKey; @@ -45,7 +44,6 @@ pub mod leader_block_commit; pub mod leader_key_register; pub mod stack_stx; pub mod transfer_stx; -pub mod vote_for_aggregate_key; #[cfg(test)] mod test; @@ -82,16 +80,12 @@ pub enum Error { // stack stx related errors StackStxMustBePositive, StackStxInvalidCycles, - StackStxInvalidKey, // errors associated with delegate stx DelegateStxMustBePositive, // sBTC errors AmountMustBePositive, - - // vote-for-aggregate-public-key errors - VoteForAggregateKeyInvalidKey, } impl fmt::Display for Error { @@ -143,11 +137,7 @@ impl fmt::Display for Error { f, "Stack STX must set num cycles between 1 and max num cycles" ), - Error::StackStxInvalidKey => write!(f, "Signer key is invalid"), Error::DelegateStxMustBePositive => write!(f, "Delegate STX must be positive amount"), - Error::VoteForAggregateKeyInvalidKey => { - write!(f, "Aggregate key is invalid") - } Self::AmountMustBePositive => write!(f, "Peg in amount must be positive"), } } @@ -192,9 +182,6 @@ pub struct StackStxOp { /// how many ustx this transaction locks pub stacked_ustx: u128, pub num_cycles: u8, - pub signer_key: Option, - pub max_amount: Option, - pub auth_id: Option, // common to all transactions pub txid: Txid, // transaction ID @@ -283,22 +270,6 @@ pub struct DelegateStxOp { pub burn_header_hash: BurnchainHeaderHash, // hash of the burn chain block header } -#[derive(Debug, PartialEq, Clone, Eq, Serialize, Deserialize)] -pub struct VoteForAggregateKeyOp { - pub sender: StacksAddress, - pub aggregate_key: StacksPublicKeyBuffer, - pub round: u32, - pub reward_cycle: u64, - pub signer_index: u16, - pub signer_key: StacksPublicKeyBuffer, - - // common to all transactions - pub txid: Txid, // transaction ID - pub vtxindex: u32, // index in the block where this tx occurs - pub block_height: u64, // block height at which this tx occurs - pub burn_header_hash: BurnchainHeaderHash, // hash of the burn chain block header -} - fn hex_ser_memo(bytes: &[u8], s: S) -> Result { let inst = to_hex(bytes); s.serialize_str(inst.as_str()) @@ -341,7 +312,6 @@ pub enum BlockstackOperationType { StackStx(StackStxOp), TransferStx(TransferStxOp), DelegateStx(DelegateStxOp), - VoteForAggregateKey(VoteForAggregateKeyOp), } // serialization helpers for blockstack_op_to_json function @@ -368,7 +338,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(_) => Opcodes::PreStx, BlockstackOperationType::TransferStx(_) => Opcodes::TransferStx, BlockstackOperationType::DelegateStx(_) => Opcodes::DelegateStx, - BlockstackOperationType::VoteForAggregateKey(_) => Opcodes::VoteForAggregateKey, } } @@ -384,7 +353,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref data) => &data.txid, BlockstackOperationType::TransferStx(ref data) => &data.txid, BlockstackOperationType::DelegateStx(ref data) => &data.txid, - BlockstackOperationType::VoteForAggregateKey(ref data) => &data.txid, } } @@ -396,7 +364,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref data) => data.vtxindex, BlockstackOperationType::TransferStx(ref data) => data.vtxindex, BlockstackOperationType::DelegateStx(ref data) => data.vtxindex, - BlockstackOperationType::VoteForAggregateKey(ref data) => data.vtxindex, } } @@ -408,7 +375,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref data) => data.block_height, BlockstackOperationType::TransferStx(ref data) => data.block_height, BlockstackOperationType::DelegateStx(ref data) => data.block_height, - BlockstackOperationType::VoteForAggregateKey(ref data) => data.block_height, } } @@ -420,7 +386,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref data) => data.burn_header_hash.clone(), BlockstackOperationType::TransferStx(ref data) => data.burn_header_hash.clone(), BlockstackOperationType::DelegateStx(ref data) => data.burn_header_hash.clone(), - BlockstackOperationType::VoteForAggregateKey(ref data) => data.burn_header_hash.clone(), } } @@ -435,9 +400,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref mut data) => data.block_height = height, BlockstackOperationType::TransferStx(ref mut data) => data.block_height = height, BlockstackOperationType::DelegateStx(ref mut data) => data.block_height = height, - BlockstackOperationType::VoteForAggregateKey(ref mut data) => { - data.block_height = height - } }; } @@ -454,9 +416,6 @@ impl BlockstackOperationType { BlockstackOperationType::PreStx(ref mut data) => data.burn_header_hash = hash, BlockstackOperationType::TransferStx(ref mut data) => data.burn_header_hash = hash, BlockstackOperationType::DelegateStx(ref mut data) => data.burn_header_hash = hash, - BlockstackOperationType::VoteForAggregateKey(ref mut data) => { - data.burn_header_hash = hash - } }; } @@ -483,9 +442,6 @@ impl BlockstackOperationType { "stacked_ustx": op.stacked_ustx, "burn_txid": op.txid, "vtxindex": op.vtxindex, - "signer_key": op.signer_key.as_ref().map(|k| serde_json::Value::String(k.to_hex())).unwrap_or(serde_json::Value::Null), - "max_amount": op.max_amount.map_or(serde_json::Value::Null, |amount| serde_json::Value::Number(serde_json::Number::from(amount))), - "auth_id": op.auth_id.map_or(serde_json::Value::Null, |id| serde_json::Value::Number(serde_json::Number::from(id))), } }) } @@ -522,23 +478,6 @@ impl BlockstackOperationType { }) } - pub fn vote_for_aggregate_key_to_json(op: &VoteForAggregateKeyOp) -> serde_json::Value { - json!({ - "vote_for_aggregate_key": { - "burn_block_height": op.block_height, - "burn_header_hash": &op.burn_header_hash.to_hex(), - "aggregate_key": op.aggregate_key.to_hex(), - "reward_cycle": op.reward_cycle, - "round": op.round, - "sender": stacks_addr_serialize(&op.sender), - "signer_index": op.signer_index, - "signer_key": op.signer_key.to_hex(), - "burn_txid": op.txid, - "vtxindex": op.vtxindex, - } - }) - } - // An explicit JSON serialization function is used (instead of using the default serialization // function) for the Blockstack ops. This is because (a) we wanted the serialization to be // more readable, and (b) the serialization used to display PoxAddress as a string is lossy, @@ -550,12 +489,9 @@ impl BlockstackOperationType { BlockstackOperationType::StackStx(op) => Self::stack_stx_to_json(op), BlockstackOperationType::TransferStx(op) => Self::transfer_stx_to_json(op), BlockstackOperationType::DelegateStx(op) => Self::delegate_stx_to_json(op), - BlockstackOperationType::VoteForAggregateKey(op) => { - Self::vote_for_aggregate_key_to_json(op) - } // json serialization for the remaining op types is not implemented for now. This function // is currently only used to json-ify burnchain ops executed as Stacks transactions (so, - // stack_stx, transfer_stx, delegate_stx, and vote_for_aggregate_key). + // stack_stx, transfer_stx, and delegate_stx). _ => json!(null), } } @@ -570,7 +506,6 @@ impl fmt::Display for BlockstackOperationType { BlockstackOperationType::LeaderBlockCommit(ref op) => write!(f, "{:?}", op), BlockstackOperationType::TransferStx(ref op) => write!(f, "{:?}", op), BlockstackOperationType::DelegateStx(ref op) => write!(f, "{:?}", op), - BlockstackOperationType::VoteForAggregateKey(ref op) => write!(f, "{:?}", op), } } } diff --git a/stackslib/src/chainstate/burn/operations/stack_stx.rs b/stackslib/src/chainstate/burn/operations/stack_stx.rs index 20dca3187a7..786c3ad1587 100644 --- a/stackslib/src/chainstate/burn/operations/stack_stx.rs +++ b/stackslib/src/chainstate/burn/operations/stack_stx.rs @@ -18,25 +18,19 @@ use std::io::{Read, Write}; use stacks_common::address::AddressHashMode; use stacks_common::codec::{write_next, Error as codec_error, StacksMessageCodec}; -use stacks_common::deps_common::bitcoin::blockdata::script::Builder; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, TrieHash, VRFSeed, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::to_hex; use stacks_common::util::log; -use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::vrf::{VRFPrivateKey, VRFPublicKey, VRF}; -use crate::burnchains::bitcoin::bits::parse_script; -use crate::burnchains::bitcoin::{BitcoinTxInput, BitcoinTxInputStructured}; use crate::burnchains::{ Address, Burnchain, BurnchainBlockHeader, BurnchainTransaction, PoxConstants, PublicKey, Txid, }; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use crate::chainstate::burn::operations::{ - parse_u128_from_be, parse_u32_from_be, parse_u64_from_be, BlockstackOperationType, - Error as op_error, PreStxOp, StackStxOp, + parse_u128_from_be, BlockstackOperationType, Error as op_error, PreStxOp, StackStxOp, }; use crate::chainstate::burn::{ConsensusHash, Opcodes}; use crate::chainstate::stacks::address::PoxAddress; @@ -49,9 +43,6 @@ use crate::net::Error as net_error; struct ParsedData { stacked_ustx: u128, num_cycles: u8, - signer_key: Option, - max_amount: Option, - auth_id: Option, } pub static OUTPUTS_PER_COMMIT: usize = 2; @@ -161,18 +152,12 @@ impl StackStxOp { reward_addr: &PoxAddress, stacked_ustx: u128, num_cycles: u8, - signer_key: Option, - max_amount: Option, - auth_id: Option, ) -> StackStxOp { StackStxOp { sender: sender.clone(), reward_addr: reward_addr.clone(), stacked_ustx, num_cycles, - signer_key, - max_amount, - auth_id, // to be filled in txid: Txid([0u8; 32]), vtxindex: 0, @@ -184,9 +169,9 @@ impl StackStxOp { fn parse_data(data: &Vec) -> Option { /* Wire format: - 0 2 3 19 20 53 69 73 - |------|--|-----------------------------|------------|-------------------|-------------------|-------------------------| - magic op uSTX to lock (u128) cycles (u8) signer key (optional) max_amount (optional u128) auth_id (optional u32) + 0 2 3 19 20 + |------|--|-----------------------------|---------| + magic op uSTX to lock (u128) cycles (u8) Note that `data` is missing the first 3 bytes -- the magic and op have been stripped @@ -198,7 +183,7 @@ impl StackStxOp { if data.len() < 17 { // too short warn!( - "StacksStxOp payload is malformed ({} bytes, expected {} or more)", + "StacksStxOp payload is malformed ({} bytes, expected {})", data.len(), 17 ); @@ -208,32 +193,9 @@ impl StackStxOp { let stacked_ustx = parse_u128_from_be(&data[0..16]).unwrap(); let num_cycles = data[16]; - let mut signer_key: Option = None; - let mut max_amount: Option = None; - let mut auth_id: Option = None; - - if data.len() >= 50 { - signer_key = Some(StacksPublicKeyBuffer::from(&data[17..50])); - } - if data.len() >= 66 { - let Some(amt) = parse_u128_from_be(&data[50..66]) else { - return None; - }; - max_amount = Some(amt); - } - if data.len() >= 70 { - let Some(id) = parse_u32_from_be(&data[66..70]) else { - return None; - }; - auth_id = Some(id); - } - Some(ParsedData { stacked_ustx, num_cycles, - signer_key, - max_amount, - auth_id, }) } @@ -335,12 +297,9 @@ impl StackStxOp { Ok(StackStxOp { sender: sender.clone(), - reward_addr, + reward_addr: reward_addr, stacked_ustx: data.stacked_ustx, num_cycles: data.num_cycles, - signer_key: data.signer_key, - max_amount: data.max_amount, - auth_id: data.auth_id, txid: tx.txid(), vtxindex: tx.vtxindex(), block_height, @@ -363,28 +322,16 @@ impl StacksMessageCodec for PreStxOp { impl StacksMessageCodec for StackStxOp { /* - 0 2 3 19 20 53 69 73 - |------|--|-----------------------------|------------|-------------------|-------------------|-------------------------| - magic op uSTX to lock (u128) cycles (u8) signer key (optional) max_amount (optional u128) auth_id (optional u32) + Wire format: + 0 2 3 19 20 + |------|--|-----------------------------|---------| + magic op uSTX to lock (u128) cycles (u8) */ fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { write_next(fd, &(Opcodes::StackStx as u8))?; fd.write_all(&self.stacked_ustx.to_be_bytes()) .map_err(|e| codec_error::WriteError(e))?; write_next(fd, &self.num_cycles)?; - - if let Some(signer_key) = &self.signer_key { - fd.write_all(&signer_key.as_bytes()[..]) - .map_err(codec_error::WriteError)?; - } - if let Some(max_amount) = &self.max_amount { - fd.write_all(&max_amount.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - } - if let Some(auth_id) = &self.auth_id { - fd.write_all(&auth_id.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - } Ok(()) } @@ -407,21 +354,13 @@ impl StackStxOp { self.num_cycles, POX_MAX_NUM_CYCLES ); } - - // Check to see if the signer key is valid if available - if let Some(signer_key) = self.signer_key { - Secp256k1PublicKey::from_slice(signer_key.as_bytes()) - .map_err(|_| op_error::StackStxInvalidKey)?; - } - Ok(()) } } #[cfg(test)] mod tests { - use stacks_common::address::{AddressHashMode, C32_ADDRESS_VERSION_MAINNET_SINGLESIG}; - use stacks_common::deps_common::bitcoin::blockdata::opcodes; + use stacks_common::address::AddressHashMode; use stacks_common::deps_common::bitcoin::blockdata::transaction::Transaction; use stacks_common::deps_common::bitcoin::network::serialize::{deserialize, serialize_hex}; use stacks_common::types::chainstate::{BlockHeaderHash, StacksAddress, VRFSeed}; @@ -674,81 +613,6 @@ mod tests { ); assert_eq!(op.stacked_ustx, u128::from_be_bytes([1; 16])); assert_eq!(op.num_cycles, 1); - assert_eq!(op.signer_key, Some(StacksPublicKeyBuffer([0x01; 33]))); - } - - #[test] - fn test_parse_stack_stx_signer_key_is_none() { - // Set the option flag for `signer_key` to None - let data = vec![1; 17]; - let tx = BitcoinTransaction { - txid: Txid([0; 32]), - vtxindex: 0, - opcode: Opcodes::StackStx as u8, - data: data, - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 0), - } - .into()], - outputs: vec![ - BitcoinTxOutput { - units: 10, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }), - }, - BitcoinTxOutput { - units: 10, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([2; 20]), - }), - }, - BitcoinTxOutput { - units: 30, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([0; 20]), - }), - }, - ], - }; - - let sender = StacksAddress { - version: 0, - bytes: Hash160([0; 20]), - }; - let op = StackStxOp::parse_from_tx( - 16843022, - &BurnchainHeaderHash([0; 32]), - StacksEpochId::Epoch2_05, - &BurnchainTransaction::Bitcoin(tx.clone()), - &sender, - 16843023, - ) - .unwrap(); - - assert_eq!(&op.sender, &sender); - assert_eq!( - &op.reward_addr, - &PoxAddress::Standard( - StacksAddress::from_legacy_bitcoin_address( - &tx.outputs[0].address.clone().expect_legacy() - ), - Some(AddressHashMode::SerializeP2PKH) - ) - ); - assert_eq!(op.stacked_ustx, u128::from_be_bytes([1; 16])); - assert_eq!(op.num_cycles, 1); - assert_eq!(op.signer_key, None); } #[test] @@ -839,41 +703,4 @@ mod tests { assert_eq!(op.stacked_ustx, u128::from_be_bytes([1; 16])); assert_eq!(op.num_cycles, 1); } - - #[test] - fn test_stack_stx_op_script_len() { - let sender_addr = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"; - let sender = StacksAddress::from_string(sender_addr).unwrap(); - let reward_addr = PoxAddress::Standard( - StacksAddress { - version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160([0x01; 20]), - }, - None, - ); - let op = StackStxOp { - sender, - reward_addr, - stacked_ustx: 10, - txid: Txid([10u8; 32]), - vtxindex: 10, - block_height: 10, - burn_header_hash: BurnchainHeaderHash([0x10; 32]), - num_cycles: 10, - signer_key: Some(StacksPublicKeyBuffer([0x01; 33])), - max_amount: Some(10), - auth_id: Some(0u32), - }; - let op_bytes = { - let mut bytes = ['T' as u8, '3' as u8].to_vec(); - op.consensus_serialize(&mut bytes) - .expect("Expected to be able to serialize op into bytes"); - bytes - }; - let script = Builder::new() - .push_opcode(opcodes::All::OP_RETURN) - .push_slice(&op_bytes) - .into_script(); - assert_eq!(script.len(), 75); - } } diff --git a/stackslib/src/chainstate/burn/operations/test/serialization.rs b/stackslib/src/chainstate/burn/operations/test/serialization.rs index cbc48f7e6ed..5e2d03514aa 100644 --- a/stackslib/src/chainstate/burn/operations/test/serialization.rs +++ b/stackslib/src/chainstate/burn/operations/test/serialization.rs @@ -4,14 +4,13 @@ use stacks_common::address::C32_ADDRESS_VERSION_MAINNET_SINGLESIG; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, VRFSeed, }; -use stacks_common::types::{Address, StacksPublicKeyBuffer}; +use stacks_common::types::Address; use stacks_common::util::hash::Hash160; use stacks_common::util::secp256k1::MessageSignature; use crate::burnchains::Txid; use crate::chainstate::burn::operations::{ BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, - VoteForAggregateKeyOp, }; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType32}; @@ -77,9 +76,6 @@ fn test_serialization_stack_stx_op() { block_height: 10, burn_header_hash: BurnchainHeaderHash([0x10; 32]), num_cycles: 10, - signer_key: None, - max_amount: None, - auth_id: None, }; let serialized_json = BlockstackOperationType::stack_stx_to_json(&op); let constructed_json = serde_json::json!({ @@ -96,58 +92,6 @@ fn test_serialization_stack_stx_op() { "stacked_ustx": 10, "burn_txid": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a", "vtxindex": 10, - "signer_key": null, - "max_amount": null, - "auth_id": null, - } - }); - - assert_json_diff::assert_json_eq!(serialized_json, constructed_json); -} - -#[test] -fn test_serialization_stack_stx_op_with_signer_key() { - let sender_addr = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"; - let sender = StacksAddress::from_string(sender_addr).unwrap(); - let reward_addr = PoxAddress::Standard( - StacksAddress { - version: C32_ADDRESS_VERSION_MAINNET_SINGLESIG, - bytes: Hash160([0x01; 20]), - }, - None, - ); - - let op = StackStxOp { - sender, - reward_addr, - stacked_ustx: 10, - txid: Txid([10u8; 32]), - vtxindex: 10, - block_height: 10, - burn_header_hash: BurnchainHeaderHash([0x10; 32]), - num_cycles: 10, - signer_key: Some(StacksPublicKeyBuffer([0x01; 33])), - max_amount: Some(10), - auth_id: Some(0u32), - }; - let serialized_json = BlockstackOperationType::stack_stx_to_json(&op); - let constructed_json = serde_json::json!({ - "stack_stx": { - "burn_block_height": 10, - "burn_header_hash": "1010101010101010101010101010101010101010101010101010101010101010", - "num_cycles": 10, - "reward_addr": "16Jswqk47s9PUcyCc88MMVwzgvHPvtEpf", - "sender": { - "address": "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2", - "address_hash_bytes": "0xaf3f91f38aa21ade7e9f95efdbc4201eeb4cf0f8", - "address_version": 26, - }, - "stacked_ustx": 10, - "burn_txid": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a", - "vtxindex": 10, - "signer_key": "01".repeat(33), - "max_amount": 10, - "auth_id": 0, } }); @@ -233,47 +177,3 @@ fn test_serialization_delegate_stx_op() { assert_json_diff::assert_json_eq!(serialized_json, constructed_json); } - -#[test] -fn test_serialization_vote_for_aggregate_key_op() { - let sender_addr = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"; - let sender = StacksAddress::from_string(sender_addr).unwrap(); - let op = VoteForAggregateKeyOp { - sender, - reward_cycle: 10, - round: 1, - signer_index: 12, - signer_key: StacksPublicKeyBuffer([0x01; 33]), - aggregate_key: StacksPublicKeyBuffer([0x02; 33]), - txid: Txid([10u8; 32]), - vtxindex: 10, - block_height: 10, - burn_header_hash: BurnchainHeaderHash([0x10; 32]), - }; - // Test both the generic and specific serialization fns - let serialized_json = BlockstackOperationType::blockstack_op_to_json( - &BlockstackOperationType::VoteForAggregateKey(op.clone()), - ); - let specialized_json_fn = BlockstackOperationType::vote_for_aggregate_key_to_json(&op); - let constructed_json = serde_json::json!({ - "vote_for_aggregate_key": { - "aggregate_key": "02".repeat(33), - "burn_block_height": 10, - "burn_header_hash": "1010101010101010101010101010101010101010101010101010101010101010", - "reward_cycle": 10, - "round": 1, - "sender": { - "address": "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2", - "address_hash_bytes": "0xaf3f91f38aa21ade7e9f95efdbc4201eeb4cf0f8", - "address_version": 26, - }, - "signer_index": 12, - "signer_key": "01".repeat(33), - "burn_txid": "0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a0a", - "vtxindex": 10, - } - }); - - assert_json_diff::assert_json_eq!(specialized_json_fn, constructed_json.clone()); - assert_json_diff::assert_json_eq!(serialized_json, constructed_json); -} diff --git a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs b/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs deleted file mode 100644 index 3933eacaa64..00000000000 --- a/stackslib/src/chainstate/burn/operations/vote_for_aggregate_key.rs +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Write}; - -use stacks_common::codec::{write_next, Error as codec_error, StacksMessageCodec}; -use stacks_common::deps_common::bitcoin::blockdata::script::Builder; -use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; -use stacks_common::types::StacksPublicKeyBuffer; -use stacks_common::util::secp256k1::Secp256k1PublicKey; -use wsts::curve::point::{Compressed, Point}; - -use crate::burnchains::bitcoin::bits::parse_script; -use crate::burnchains::bitcoin::{BitcoinTxInput, BitcoinTxInputStructured}; -use crate::burnchains::{BurnchainBlockHeader, BurnchainTransaction, Txid}; -use crate::chainstate::burn::operations::{ - parse_u128_from_be, parse_u16_from_be, parse_u32_from_be, parse_u64_from_be, - BlockstackOperationType, Error as op_error, PreStxOp, VoteForAggregateKeyOp, -}; -use crate::chainstate::burn::Opcodes; -use crate::chainstate::stacks::address::PoxAddress; - -struct ParsedData { - signer_index: u16, - aggregate_key: StacksPublicKeyBuffer, - round: u32, - reward_cycle: u64, -} - -impl VoteForAggregateKeyOp { - pub fn from_tx( - block_header: &BurnchainBlockHeader, - tx: &BurnchainTransaction, - sender: &StacksAddress, - ) -> Result { - VoteForAggregateKeyOp::parse_from_tx( - block_header.block_height, - &block_header.block_hash, - tx, - sender, - ) - } - - fn parse_data(data: &Vec) -> Option { - /* - Wire format: - - 0 2 3 5 38 42 50 - |-----|----|-----------|--------------|------|------------| - magic op signer_index aggregate_key round reward_cycle - - Note that `data` is missing the first 3 bytes -- the magic and op have been stripped - */ - - if data.len() != 47 { - warn!( - "Vote for aggregate key operation data has an invalid length ({} bytes)", - data.len() - ); - return None; - } - - let signer_index = - parse_u16_from_be(&data[0..2]).expect("Failed to parse signer index from tx"); - let aggregate_key = StacksPublicKeyBuffer::from(&data[2..35]); - - let round = parse_u32_from_be(&data[35..39]).expect("Failed to parse round from tx"); - let reward_cycle = - parse_u64_from_be(&data[39..47]).expect("Failed to parse reward cycle from tx"); - - Some(ParsedData { - signer_index, - aggregate_key, - round, - reward_cycle, - }) - } - - pub fn get_sender_txid(tx: &BurnchainTransaction) -> Result<&Txid, op_error> { - match tx.get_input_tx_ref(0) { - Some((ref txid, vout)) => { - if *vout != 1 { - warn!("Invalid tx: VoteForAggregateKey must spend the second output of the PreStxOp"); - Err(op_error::InvalidInput) - } else { - Ok(txid) - } - } - None => { - warn!("Invalid tx: VoteForAggregateKey must have at least one input"); - Err(op_error::InvalidInput) - } - } - } - - pub fn get_sender_pubkey(tx: &BurnchainTransaction) -> Result { - match tx { - BurnchainTransaction::Bitcoin(ref btc) => match btc.inputs.get(0) { - Some(BitcoinTxInput::Raw(input)) => { - let script_sig = Builder::from(input.scriptSig.clone()).into_script(); - let structured_input = BitcoinTxInputStructured::from_bitcoin_p2pkh_script_sig( - &parse_script(&script_sig), - input.tx_ref, - ) - .ok_or(op_error::InvalidInput)?; - structured_input - .keys - .get(0) - .cloned() - .ok_or(op_error::InvalidInput) - } - Some(BitcoinTxInput::Structured(input)) => { - input.keys.get(0).cloned().ok_or(op_error::InvalidInput) - } - _ => Err(op_error::InvalidInput), - }, - } - } - - pub fn parse_from_tx( - block_height: u64, - block_hash: &BurnchainHeaderHash, - tx: &BurnchainTransaction, - sender: &StacksAddress, - ) -> Result { - let outputs = tx.get_recipients(); - - if tx.num_signers() == 0 { - warn!( - "Invalid tx: inputs: {}, outputs: {}", - tx.num_signers(), - outputs.len() - ); - return Err(op_error::InvalidInput); - } - - if tx.opcode() != Opcodes::VoteForAggregateKey as u8 { - warn!("Invalid tx: invalid opcode {}", tx.opcode()); - return Err(op_error::InvalidInput); - }; - - let data = VoteForAggregateKeyOp::parse_data(&tx.data()).ok_or_else(|| { - warn!("Invalid tx data"); - op_error::ParseError - })?; - - let signer_key = VoteForAggregateKeyOp::get_sender_pubkey(tx)?; - - Ok(VoteForAggregateKeyOp { - sender: sender.clone(), - signer_index: data.signer_index, - aggregate_key: data.aggregate_key, - round: data.round, - reward_cycle: data.reward_cycle, - signer_key: signer_key.to_bytes_compressed().as_slice().into(), - txid: tx.txid(), - vtxindex: tx.vtxindex(), - block_height, - burn_header_hash: block_hash.clone(), - }) - } - - /// Check the payload of a vote-for-aggregate-key burn op. - /// Both `signer_key` and `aggregate_key` are checked for validity against - /// `Secp256k1PublicKey` from `stacks_common` as well as `Point` from wsts. - pub fn check(&self) -> Result<(), op_error> { - // Check to see if the aggregate key is valid - let aggregate_key_bytes = self.aggregate_key.as_bytes(); - Secp256k1PublicKey::from_slice(aggregate_key_bytes) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - - let compressed = Compressed::try_from(aggregate_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - - // Check to see if the signer key is valid - let signer_key_bytes = self.signer_key.as_bytes(); - Secp256k1PublicKey::from_slice(signer_key_bytes) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - - let compressed = Compressed::try_from(signer_key_bytes.clone()) - .map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - Point::try_from(&compressed).map_err(|_| op_error::VoteForAggregateKeyInvalidKey)?; - - Ok(()) - } -} - -impl StacksMessageCodec for VoteForAggregateKeyOp { - fn consensus_serialize(&self, fd: &mut W) -> Result<(), codec_error> { - /* - Wire format: - - 0 2 3 5 38 42 50 - |-----|----|-----------|--------------|------|------------| - magic op signer_index aggregate_key round reward_cycle - */ - - write_next(fd, &(Opcodes::VoteForAggregateKey as u8))?; - fd.write_all(&self.signer_index.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(self.aggregate_key.as_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(&self.round.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - fd.write_all(&self.reward_cycle.to_be_bytes()) - .map_err(|e| codec_error::WriteError(e))?; - - Ok(()) - } - - fn consensus_deserialize(_fd: &mut R) -> Result { - // Op deserialized through burchain indexer - unimplemented!(); - } -} - -#[cfg(test)] -mod tests { - use stacks_common::deps_common::bitcoin::blockdata::script::Builder; - use stacks_common::types; - use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksAddress}; - use stacks_common::types::{Address, StacksPublicKeyBuffer}; - use stacks_common::util::hash::*; - use stacks_common::util::secp256k1::Secp256k1PublicKey; - - use crate::burnchains::bitcoin::address::{ - BitcoinAddress, LegacyBitcoinAddress, LegacyBitcoinAddressType, - }; - use crate::burnchains::bitcoin::{ - BitcoinInputType, BitcoinNetworkType, BitcoinTransaction, BitcoinTxInput, - BitcoinTxInputRaw, BitcoinTxInputStructured, BitcoinTxOutput, - }; - use crate::burnchains::{BurnchainTransaction, Txid}; - use crate::chainstate::burn::operations::{Error as op_error, VoteForAggregateKeyOp}; - use crate::chainstate::burn::Opcodes; - use crate::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; - - #[test] - fn test_parse_vote_tx_signer_key() { - let aggregate_key = StacksPublicKeyBuffer([0x01; 33]); - let signer_key = StacksPublicKeyBuffer([0x02; 33]); - let signer_pubkey = Secp256k1PublicKey::from_slice(signer_key.as_bytes()).unwrap(); - let tx = BitcoinTransaction { - txid: Txid([0; 32]), - vtxindex: 0, - opcode: Opcodes::VoteForAggregateKey as u8, - data: vec![1; 47], - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![signer_pubkey], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 0), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }), - }], - }; - - let sender = StacksAddress { - version: 0, - bytes: Hash160([0; 20]), - }; - let vote_op = VoteForAggregateKeyOp::parse_from_tx( - 1000, - &BurnchainHeaderHash([0; 32]), - &BurnchainTransaction::Bitcoin(tx), - &sender, - ) - .expect("Failed to parse vote tx"); - - assert_eq!(&vote_op.sender, &sender); - assert_eq!(&vote_op.signer_key, &signer_key); - } - - #[test] - fn test_vote_tx_data() { - let round: u32 = 24; - let signer_index: u16 = 12; - let aggregate_key = StacksPublicKeyBuffer([0x01; 33]); - let signer_key = StacksPublicKeyBuffer([0x02; 33]); - let reward_cycle: u64 = 10; - - let mut data: Vec = vec![]; - - data.extend_from_slice(&signer_index.to_be_bytes()); - data.extend_from_slice(aggregate_key.as_bytes()); - data.extend_from_slice(&round.to_be_bytes()); - data.extend_from_slice(&reward_cycle.to_be_bytes()); - - let signer_key = StacksPublicKeyBuffer([0x02; 33]); - let signer_pubkey = Secp256k1PublicKey::from_slice(signer_key.as_bytes()).unwrap(); - let tx = BitcoinTransaction { - txid: Txid([0; 32]), - vtxindex: 0, - opcode: Opcodes::VoteForAggregateKey as u8, - data: data.clone(), - data_amt: 0, - inputs: vec![BitcoinTxInputStructured { - keys: vec![signer_pubkey], - num_required: 0, - in_type: BitcoinInputType::Standard, - tx_ref: (Txid([0; 32]), 0), - } - .into()], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }), - }], - }; - - let sender = StacksAddress { - version: 0, - bytes: Hash160([0; 20]), - }; - let vote_op = VoteForAggregateKeyOp::parse_from_tx( - 1000, - &BurnchainHeaderHash([0; 32]), - &BurnchainTransaction::Bitcoin(tx), - &sender, - ) - .expect("Failed to parse vote tx"); - - debug!("Vote op test data: {:?}", to_hex(data.as_slice())); - - assert_eq!(vote_op.signer_index, signer_index); - assert_eq!(&vote_op.aggregate_key, &aggregate_key); - assert_eq!(vote_op.round, round as u32); - assert_eq!(vote_op.reward_cycle, reward_cycle); - } - - #[test] - fn test_raw_input_signer_key() { - let aggregate_key = StacksPublicKeyBuffer([0x01; 33]); - let signer_key = Secp256k1PublicKey::from_hex("040fadbbcea0ff3b05f03195b41cd991d7a0af8bd38559943aec99cbdaf0b22cc806b9a4f07579934774cc0c155e781d45c989f94336765e88a66d91cfb9f060b0").unwrap(); - let tx = BitcoinTransaction { - txid: Txid([0; 32]), - vtxindex: 0, - opcode: Opcodes::VoteForAggregateKey as u8, - data: vec![1; 47], - data_amt: 0, - inputs: vec![BitcoinTxInput::Raw(BitcoinTxInputRaw { - scriptSig: hex_bytes("483045022100be57031bf2c095945ba2876e97b3f86ee051643a29b908f22ed45ccf58620103022061e056e5f48c5a51c66604a1ca28e4bfaabab1478424c9bbb396cc6afe5c222e0141040fadbbcea0ff3b05f03195b41cd991d7a0af8bd38559943aec99cbdaf0b22cc806b9a4f07579934774cc0c155e781d45c989f94336765e88a66d91cfb9f060b0").unwrap(), - witness: vec![], - tx_ref: (Txid([0; 32]), 0), - })], - outputs: vec![BitcoinTxOutput { - units: 10, - address: BitcoinAddress::Legacy(LegacyBitcoinAddress { - addrtype: LegacyBitcoinAddressType::PublicKeyHash, - network_id: BitcoinNetworkType::Mainnet, - bytes: Hash160([1; 20]), - }), - }], - }; - - let sender = StacksAddress { - version: 0, - bytes: Hash160([0; 20]), - }; - let vote_op = VoteForAggregateKeyOp::parse_from_tx( - 1000, - &BurnchainHeaderHash([0; 32]), - &BurnchainTransaction::Bitcoin(tx), - &sender, - ) - .expect("Failed to parse vote tx"); - - assert_eq!(&vote_op.sender, &sender); - assert_eq!( - &vote_op.signer_key, - &signer_key.to_bytes_compressed().as_slice().into() - ); - } - - #[test] - fn test_key_validation() { - let sender_addr = "ST2QKZ4FKHAH1NQKYKYAYZPY440FEPK7GZ1R5HBP2"; - let sender = StacksAddress::from_string(sender_addr).unwrap(); - let op = VoteForAggregateKeyOp { - sender, - reward_cycle: 10, - round: 1, - signer_index: 12, - signer_key: StacksPublicKeyBuffer([0x00; 33]), - aggregate_key: StacksPublicKeyBuffer([0x00; 33]), - txid: Txid([10u8; 32]), - vtxindex: 10, - block_height: 10, - burn_header_hash: BurnchainHeaderHash([0x10; 32]), - }; - - match op.check() { - Ok(_) => panic!("Invalid key should not pass validation"), - Err(op_error::VoteForAggregateKeyInvalidKey) => (), - Err(e) => panic!("Unexpected error: {:?}", e), - } - } -} diff --git a/stackslib/src/chainstate/burn/sortition.rs b/stackslib/src/chainstate/burn/sortition.rs index 7d86f848037..865b4f432dd 100644 --- a/stackslib/src/chainstate/burn/sortition.rs +++ b/stackslib/src/chainstate/burn/sortition.rs @@ -117,10 +117,7 @@ impl BlockSnapshot { } pub fn get_canonical_stacks_block_id(&self) -> StacksBlockId { - StacksBlockId::new( - &self.canonical_stacks_tip_consensus_hash, - &self.canonical_stacks_tip_hash, - ) + StacksBlockId::new(&self.consensus_hash, &self.canonical_stacks_tip_hash) } /// Given the weighted burns, VRF seed of the last winner, and sortition hash, pick the next diff --git a/stackslib/src/chainstate/coordinator/mod.rs b/stackslib/src/chainstate/coordinator/mod.rs index aee18533048..a05cbc94b14 100644 --- a/stackslib/src/chainstate/coordinator/mod.rs +++ b/stackslib/src/chainstate/coordinator/mod.rs @@ -26,7 +26,6 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::database::BurnStateDB; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use clarity::vm::Value; -use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksBlockId, }; @@ -178,7 +177,6 @@ pub trait BlockEventDispatcher { mblock_confirmed_consumed: &ExecutionCost, pox_constants: &PoxConstants, reward_set_data: &Option, - signer_bitvec: &Option>, ); /// called whenever a burn block is about to be @@ -239,8 +237,6 @@ pub struct ChainsCoordinator< /// Used to tell the P2P thread that the stackerdb /// needs to be refreshed. pub refresh_stacker_db: Arc, - /// whether or not the canonical tip is now a Nakamoto header - pub in_nakamoto_epoch: bool, } #[derive(Debug)] @@ -542,7 +538,6 @@ impl< config, burnchain_indexer, refresh_stacker_db: comms.refresh_stacker_db.clone(), - in_nakamoto_epoch: false, }; let mut nakamoto_available = false; @@ -704,7 +699,6 @@ impl<'a, T: BlockEventDispatcher, U: RewardSetProvider, B: BurnchainHeaderReader config: ChainsCoordinatorConfig::new(), burnchain_indexer, refresh_stacker_db: Arc::new(AtomicBool::new(false)), - in_nakamoto_epoch: false, } } } @@ -2489,7 +2483,7 @@ impl< BurnchainDB::get_burnchain_block(&self.burnchain_blocks_db.conn(), &cursor) .map_err(|e| { warn!( - "ChainsCoordinator: could not retrieve block burnhash={}", + "ChainsCoordinator: could not retrieve block burnhash={}", &cursor ); Error::NonContiguousBurnchainBlock(e) diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 72f3ed71215..e3fc8f21c4f 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -38,7 +38,6 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, PoxId, SortitionId, StacksAddress, StacksBlockId, TrieHash, VRFSeed, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Hash160}; use stacks_common::util::secp256k1::MessageSignature; use stacks_common::util::vrf::*; @@ -428,7 +427,6 @@ impl BlockEventDispatcher for NullEventDispatcher { _confirmed_mblock_cost: &ExecutionCost, _pox_constants: &PoxConstants, _reward_set_data: &Option, - _signer_bitvec: &Option>, ) { assert!( false, @@ -513,7 +511,6 @@ impl RewardSetProvider for StubbedRewardSetProvider { missed_reward_slots: vec![], }, signers: None, - pox_ustx_threshold: None, }) } @@ -602,7 +599,6 @@ pub fn get_chainstate(path: &str) -> StacksChainState { } fn make_genesis_block( - burnchain: &Burnchain, sort_db: &SortitionDB, state: &mut StacksChainState, parent_block: &BlockHeaderHash, @@ -612,7 +608,6 @@ fn make_genesis_block( key_index: u32, ) -> (BlockstackOperationType, StacksBlock) { make_genesis_block_with_recipients( - burnchain, sort_db, state, parent_block, @@ -627,7 +622,6 @@ fn make_genesis_block( /// build a stacks block with just the coinbase off of /// parent_block, in the canonical sortition fork. fn make_genesis_block_with_recipients( - burnchain: &Burnchain, sort_db: &SortitionDB, state: &mut StacksChainState, parent_block: &BlockHeaderHash, @@ -658,7 +652,6 @@ fn make_genesis_block_with_recipients( let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); let mut builder = StacksBlockBuilder::make_regtest_block_builder( - burnchain, &parent_stacks_header, proof.clone(), 0, @@ -667,7 +660,7 @@ fn make_genesis_block_with_recipients( .unwrap(); let iconn = sort_db.index_conn(); - let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); + let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn).unwrap(); let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder .epoch_begin(&iconn, &mut miner_epoch_info) @@ -924,14 +917,13 @@ fn make_stacks_block_with_input( let iconn = sort_db.index_conn(); let mut builder = StacksBlockBuilder::make_regtest_block_builder( - burnchain, &parent_stacks_header, proof.clone(), total_burn, next_hash160(), ) .unwrap(); - let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn, true).unwrap(); + let mut miner_epoch_info = builder.pre_epoch_begin(state, &iconn).unwrap(); let ast_rules = miner_epoch_info.ast_rules.clone(); let mut epoch_tx = builder .epoch_begin(&iconn, &mut miner_epoch_info) @@ -1137,7 +1129,6 @@ fn missed_block_commits_2_05() { let (mut good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -1466,7 +1457,6 @@ fn missed_block_commits_2_1() { let (mut good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -1809,7 +1799,6 @@ fn late_block_commits_2_1() { let (mut good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -2073,7 +2062,6 @@ fn test_simple_setup() { let (op, block) = if ix == 0 { make_genesis_block( - &b, &sort_db, &mut chainstate, &parent, @@ -2341,7 +2329,6 @@ fn test_sortition_with_reward_set() { let b = get_burnchain(path, None); let (good_op, mut block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -2611,7 +2598,6 @@ fn test_sortition_with_burner_reward_set() { let b = get_burnchain(path, None); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -2855,7 +2841,6 @@ fn test_pox_btc_ops() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -2898,9 +2883,6 @@ fn test_pox_btc_ops() { reward_addr: rewards.clone(), stacked_ustx: stacked_amt, num_cycles: 4, - signer_key: Some(StacksPublicKeyBuffer([0x02; 33])), - max_amount: Some(u128::MAX), - auth_id: Some(0u32), txid: next_txid(), vtxindex: 5, block_height: 0, @@ -3142,7 +3124,6 @@ fn test_stx_transfer_btc_ops() { let b = get_burnchain(path, pox_consts.clone()); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -3560,7 +3541,6 @@ fn test_delegate_stx_btc_ops() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -3895,7 +3875,6 @@ fn test_initial_coinbase_reward_distributions() { let b = get_burnchain(path, pox_consts.clone()); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -4091,7 +4070,6 @@ fn test_epoch_switch_cost_contract_instantiation() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -4295,7 +4273,6 @@ fn test_epoch_switch_pox_2_contract_instantiation() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -4501,7 +4478,6 @@ fn test_epoch_switch_pox_3_contract_instantiation() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -4790,7 +4766,6 @@ fn atlas_stop_start() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -4939,7 +4914,7 @@ fn test_epoch_verify_active_pox_contract() { let _r = std::fs::remove_dir_all(path); let pox_v1_unlock_ht = 12; - let pox_v2_unlock_ht = u32::MAX; + let pox_v2_unlock_ht = u32::max_value(); let sunset_ht = 8000; let pox_consts = Some(PoxConstants::new( 6, @@ -5044,7 +5019,6 @@ fn test_epoch_verify_active_pox_contract() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -5102,9 +5076,6 @@ fn test_epoch_verify_active_pox_contract() { reward_addr: rewards.clone(), stacked_ustx: stacked_amt, num_cycles: 1, - signer_key: None, - max_amount: None, - auth_id: None, txid: next_txid(), vtxindex: 5, block_height: 0, @@ -5120,9 +5091,6 @@ fn test_epoch_verify_active_pox_contract() { reward_addr: rewards.clone(), stacked_ustx: stacked_amt * 2, num_cycles: 5, - signer_key: None, - max_amount: None, - auth_id: None, txid: next_txid(), vtxindex: 6, block_height: 0, @@ -5136,9 +5104,6 @@ fn test_epoch_verify_active_pox_contract() { reward_addr: rewards.clone(), stacked_ustx: stacked_amt * 4, num_cycles: 1, - signer_key: None, - max_amount: None, - auth_id: None, txid: next_txid(), vtxindex: 7, block_height: 0, @@ -5417,7 +5382,6 @@ fn test_sortition_with_sunset() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -5756,7 +5720,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( - &b, &sort_db, &mut chainstate, &parent, @@ -5987,7 +5950,6 @@ fn test_pox_processable_block_in_different_pox_forks() { eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( - &b, &sort_db, &mut chainstate, &BlockHeaderHash([0; 32]), @@ -6273,7 +6235,6 @@ fn test_pox_no_anchor_selected() { eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( - &b, &sort_db, &mut chainstate, &BlockHeaderHash([0; 32]), @@ -6488,7 +6449,6 @@ fn test_pox_fork_out_of_order() { eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( - &b, &sort_db, &mut chainstate, &BlockHeaderHash([0; 32]), diff --git a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs index 4bb23efc815..15793b33c88 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/mod.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/mod.rs @@ -483,44 +483,6 @@ impl< if (bits & (CoordinatorEvents::NEW_STACKS_BLOCK as u8)) != 0 { signal_mining_blocked(miner_status.clone()); debug!("Received new Nakamoto stacks block notice"); - - // we may still be processing epoch 2 blocks after the Nakamoto transition, so be sure - // to process them so we can get to the Nakamoto blocks! - if !self.in_nakamoto_epoch { - debug!("Check to see if the system has entered the Nakamoto epoch"); - if let Ok(Some(canonical_header)) = NakamotoChainState::get_canonical_block_header( - &self.chain_state_db.db(), - &self.sortition_db, - ) { - if canonical_header.is_nakamoto_block() { - // great! don't check again - debug!( - "The canonical Stacks tip ({}/{}) is a Nakamoto block!", - &canonical_header.consensus_hash, - &canonical_header.anchored_header.block_hash() - ); - self.in_nakamoto_epoch = true; - } else { - // need to process epoch 2 blocks - debug!("Received new epoch 2.x Stacks block notice"); - match self.handle_new_stacks_block() { - Ok(missing_block_opt) => { - if missing_block_opt.is_some() { - debug!( - "Missing affirmed anchor block: {:?}", - &missing_block_opt.as_ref().expect("unreachable") - ); - } - } - Err(e) => { - warn!("Error processing new stacks block: {:?}", e); - } - } - } - } - } - - // now we can process the nakamoto block match self.handle_new_nakamoto_stacks_block() { Ok(new_anchor_block_opt) => { if let Some(bhh) = new_anchor_block_opt { @@ -876,21 +838,19 @@ impl< // N.B. it's `- 2` because `is_reward_cycle_start` implies that `block_height % reward_cycle_length == 1`, // but this call needs `block_height % reward_cycle_length == reward_cycle_length - 1` -- i.e. `block_height` // must be the last block height in the last reward cycle. - let end_cycle_block_height = header.block_height.saturating_sub(2); let reward_cycle_info = - self.get_nakamoto_reward_cycle_info(end_cycle_block_height)?; + self.get_nakamoto_reward_cycle_info(header.block_height - 2)?; if let Some(rc_info) = reward_cycle_info.as_ref() { // in nakamoto, if we have any reward cycle info at all, it will be known. - // otherwise, we may have to process some more Stacks blocks - if rc_info.known_selected_anchor_block().is_none() { - warn!("Unknown PoX anchor block in Nakamoto (at height {}). Refusing to process more burnchain blocks until that changes.", end_cycle_block_height); - return Ok(false); - } + assert!( + rc_info.known_selected_anchor_block().is_some(), + "FATAL: unknown PoX anchor block in Nakamoto" + ); } else { // have to block -- we don't have the reward cycle information debug!("Do not yet have PoX anchor block for next reward cycle -- no anchor block found"; "next_reward_cycle" => self.burnchain.block_height_to_reward_cycle(header.block_height), - "reward_cycle_end" => end_cycle_block_height + "reward_cycle_end" => header.block_height - 2 ); return Ok(false); } diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index 6ec9fe37a53..f2dcfe4c181 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -403,7 +403,6 @@ fn replay_reward_cycle( &mut sort_handle, &mut node.chainstate, block.clone(), - None, ) .unwrap(); if accepted { @@ -641,7 +640,7 @@ fn test_nakamoto_chainstate_getters() { assert_eq!( NakamotoChainState::check_sortition_exists(&mut sort_tx, &sort_tip.consensus_hash) .unwrap(), - sort_tip + (sort_tip.burn_header_hash.clone(), sort_tip.block_height) ); } diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 33ee2653690..5edeac4c637 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -60,7 +60,7 @@ use crate::chainstate::stacks::db::{ }; use crate::chainstate::stacks::events::{StacksTransactionEvent, StacksTransactionReceipt}; use crate::chainstate::stacks::miner::{ - BlockBuilder, BlockBuilderSettings, BlockLimitFunction, TransactionError, TransactionEvent, + BlockBuilder, BlockBuilderSettings, BlockLimitFunction, TransactionError, TransactionProblematic, TransactionResult, TransactionSkipped, }; use crate::chainstate::stacks::{Error, StacksBlockHeader, *}; @@ -406,7 +406,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_transactions: Vec, - ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + ) -> Result<(NakamotoBlock, ExecutionCost, u64), Error> { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -485,6 +485,16 @@ impl NakamotoBlockBuilder { let ts_end = get_epoch_time_ms(); + if let Some(observer) = event_observer { + observer.mined_nakamoto_block_event( + SortitionDB::get_canonical_burn_chain_tip(burn_dbconn.conn())?.block_height + 1, + &block, + size, + &consumed, + tx_events, + ); + } + set_last_mined_block_transaction_count(block.txs.len() as u64); set_last_mined_execution_cost_observed(&consumed, &block_limit); @@ -501,7 +511,7 @@ impl NakamotoBlockBuilder { "assembly_time_ms" => ts_end.saturating_sub(ts_start), ); - Ok((block, consumed, size, tx_events)) + Ok((block, consumed, size)) } pub fn get_bytes_so_far(&self) -> u64 { @@ -514,22 +524,19 @@ impl NakamotoBlockBuilder { /// Returns Some(chunk) if the given key corresponds to one of the expected miner slots /// Returns None if not /// Returns an error on signing or DB error - pub fn make_stackerdb_block_proposal( + pub fn make_stackerdb_block_proposal( sortdb: &SortitionDB, tip: &BlockSnapshot, stackerdbs: &StackerDBs, - block: &T, + block: &NakamotoBlock, miner_privkey: &StacksPrivateKey, miners_contract_id: &QualifiedContractIdentifier, ) -> Result, Error> { let miner_pubkey = StacksPublicKey::from_private(&miner_privkey); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey)? - else { + let Some(slot_id) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey)? else { // No slot exists for this miner return Ok(None); }; - // proposal slot is the first slot. - let slot_id = slot_range.start; // Get the LAST slot version number written to the DB. If not found, use 0. // Add 1 to get the NEXT version number // Note: we already check above for the slot's existence diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 2b3635660ec..8ab9f22697b 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -16,7 +16,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; use std::fs; -use std::ops::{Deref, DerefMut, Range}; +use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use clarity::vm::ast::ASTRules; @@ -26,7 +26,6 @@ use clarity::vm::events::StacksTransactionEvent; use clarity::vm::types::{PrincipalData, StacksAddressExtensions, TupleData}; use clarity::vm::{ClarityVersion, SymbolicExpression, Value}; use lazy_static::{__Deref, lazy_static}; -use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; use sha2::{Digest as Sha2Digest, Sha512_256}; @@ -55,7 +54,7 @@ use super::burn::db::sortdb::{ get_ancestor_sort_id, get_ancestor_sort_id_tx, get_block_commit_by_txid, SortitionHandle, SortitionHandleConn, SortitionHandleTx, }; -use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp}; +use super::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; use super::stacks::boot::{ PoxVersions, RawRewardSetEntry, RewardSet, RewardSetData, BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, SIGNERS_MAX_LIST_SIZE, SIGNERS_NAME, SIGNERS_PK_LEN, @@ -90,7 +89,7 @@ use crate::clarity_vm::clarity::{ }; use crate::clarity_vm::database::SortitionDBRef; use crate::core::BOOT_BLOCK_HASH; -use crate::net::stackerdb::{StackerDBConfig, MINER_SLOT_COUNT}; +use crate::net::stackerdb::StackerDBConfig; use crate::net::Error as net_error; use crate::util_lib::boot; use crate::util_lib::boot::boot_code_id; @@ -279,8 +278,6 @@ pub struct SetupBlockResult<'a, 'b> { pub auto_unlock_events: Vec, /// Result of a signer set calculation if one occurred pub signer_set_calc: Option, - /// vote-for-aggregate-key Stacks-on-Bitcoin txs - pub burn_vote_for_aggregate_key_ops: Vec, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] @@ -489,13 +486,6 @@ impl NakamotoBlockHeader { Ok(()) } - /// Verify the block header against an aggregate public key - pub fn verify_signer(&self, signer_aggregate: &Point) -> bool { - let schnorr_signature = &self.signer_signature.0; - let message = self.signer_signature_hash().0; - schnorr_signature.verify(signer_aggregate, &message) - } - /// Make an "empty" header whose block data needs to be filled in. /// This is used by the miner code. pub fn from_parent_empty( @@ -654,7 +644,7 @@ impl NakamotoBlock { /// Try to get the first transaction in the block as a tenure-change /// Return Some(tenure-change-payload) if it's a tenure change /// Return None if not - pub fn try_get_tenure_change_payload(&self) -> Option<&TenureChangePayload> { + fn try_get_tenure_change_payload(&self) -> Option<&TenureChangePayload> { if self.txs.len() == 0 { return None; } @@ -1056,7 +1046,7 @@ impl NakamotoBlock { /// Arguments /// -- `tenure_burn_chain_tip` is the BlockSnapshot containing the block-commit for this block's /// tenure. It is not always the tip of the burnchain. - /// -- `expected_burn` is the total number of burnchain tokens spent, if known. + /// -- `expected_burn` is the total number of burnchain tokens spent /// -- `leader_key` is the miner's leader key registration transaction /// /// Verifies the following: @@ -1069,7 +1059,7 @@ impl NakamotoBlock { pub fn validate_against_burnchain( &self, tenure_burn_chain_tip: &BlockSnapshot, - expected_burn: Option, + expected_burn: u64, leader_key: &LeaderKeyRegisterOp, ) -> Result<(), ChainstateError> { // this block's consensus hash must match the sortition that selected it @@ -1084,16 +1074,14 @@ impl NakamotoBlock { } // this block must commit to all of the work seen so far - if let Some(expected_burn) = expected_burn { - if self.header.burn_spent != expected_burn { - warn!("Invalid Nakamoto block header: invalid total burns"; - "header.burn_spent" => self.header.burn_spent, - "expected_burn" => expected_burn, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Invalid Nakamoto block: invalid total burns".into(), - )); - } + if self.header.burn_spent != expected_burn { + warn!("Invalid Nakamoto block header: invalid total burns"; + "header.burn_spent" => self.header.burn_spent, + "expected_burn" => expected_burn, + ); + return Err(ChainstateError::InvalidStacksBlock( + "Invalid Nakamoto block: invalid total burns".into(), + )); } // miner must have signed this block @@ -1276,7 +1264,6 @@ impl NakamotoChainState { nakamoto_blocks_db.next_ready_nakamoto_block(stacks_chain_state.db(), sort_tx)? else { // no more blocks - test_debug!("No more Nakamoto blocks to process"); return Ok(None); }; @@ -1450,8 +1437,6 @@ impl NakamotoChainState { // succeeds, since *we have already processed* the block. Self::infallible_set_block_processed(stacks_chain_state, &block_id); - let signer_bitvec = (&next_ready_block).header.signer_bitvec.clone(); - // announce the block, if we're connected to an event dispatcher if let Some(dispatcher) = dispatcher_opt { let block_event = ( @@ -1474,7 +1459,6 @@ impl NakamotoChainState { &receipt.parent_microblocks_cost, &pox_constants, &reward_set_data, - &Some(signer_bitvec), ); } @@ -1488,24 +1472,26 @@ impl NakamotoChainState { /// * otherwise, it's the highest processed tenure's sortition consensus hash's snapshot's burn /// total. /// - /// This function will return Ok(None) if the given block's parent is not yet processed. This - /// by itself is not necessarily an error, because a block can be stored for subsequent - /// processing before its parent has been processed. The `Self::append_block()` function, - /// however, will flag a block as invalid in this case, because the parent must be available in - /// order to process a block. + /// TODO: unit test pub(crate) fn get_expected_burns( sort_handle: &mut SH, chainstate_conn: &Connection, block: &NakamotoBlock, - ) -> Result, ChainstateError> { + ) -> Result { let burn_view_ch = if let Some(tenure_payload) = block.get_tenure_tx_payload() { tenure_payload.burn_view_consensus_hash } else { // if there's no new tenure for this block, the burn total should be the same as its parent - let parent_burns_opt = - Self::get_block_header(chainstate_conn, &block.header.parent_block_id)? - .map(|parent| parent.anchored_header.total_burns()); - return Ok(parent_burns_opt); + let parent = Self::get_block_header(chainstate_conn, &block.header.parent_block_id)? + .ok_or_else(|| { + warn!("Could not load expected burns -- no parent block"; + "block_id" => %block.block_id(), + "parent_block_id" => %block.header.parent_block_id + ); + ChainstateError::NoSuchBlockError + })?; + + return Ok(parent.anchored_header.total_burns()); }; let burn_view_sn = SortitionDB::get_block_snapshot_consensus(sort_handle.sqlite(), &burn_view_ch)? @@ -1515,7 +1501,7 @@ impl NakamotoChainState { ); ChainstateError::NoSuchBlockError })?; - Ok(Some(burn_view_sn.total_burn)) + Ok(burn_view_sn.total_burn) } /// Validate that a Nakamoto block attaches to the burn chain state. @@ -1524,7 +1510,7 @@ impl NakamotoChainState { /// verifies that all transactions in the block are allowed in this epoch. pub fn validate_nakamoto_block_burnchain( db_handle: &SortitionHandleConn, - expected_burn: Option, + expected_burn: u64, block: &NakamotoBlock, mainnet: bool, chain_id: u32, @@ -1635,18 +1621,11 @@ impl NakamotoChainState { burn_attachable: bool, ) -> Result<(), ChainstateError> { let block_id = block.block_id(); - let Ok(tenure_start) = block.is_wellformed_tenure_start_block() else { - return Err(ChainstateError::InvalidStacksBlock( - "Tried to store a tenure-start block that is not well-formed".into(), - )); - }; - staging_db_tx.execute( "INSERT INTO nakamoto_staging_blocks ( block_hash, consensus_hash, parent_block_id, - is_tenure_start, burn_attachable, orphaned, processed, @@ -1657,12 +1636,11 @@ impl NakamotoChainState { arrival_time, processed_time, data - ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12, ?13)", + ) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)", params![ &block.header.block_hash(), &block.header.consensus_hash, &block.header.parent_block_id, - &tenure_start, if burn_attachable { 1 } else { 0 }, 0, 0, @@ -1721,15 +1699,18 @@ impl NakamotoChainState { ChainstateError::InvalidStacksBlock("Not a well-formed tenure-extend block".into()) })?; - // it's okay if this fails because we might not have the parent block yet. It will be - // checked on `::append_block()` - let expected_burn_opt = Self::get_expected_burns(db_handle, headers_conn, &block)?; + let Ok(expected_burn) = Self::get_expected_burns(db_handle, headers_conn, &block) else { + warn!("Unacceptable Nakamoto block: unable to find its paired sortition"; + "block_id" => %block.block_id(), + ); + return Ok(false); + }; // this block must be consistent with its miner's leader-key and block-commit, and must // contain only transactions that are valid in this epoch. if let Err(e) = Self::validate_nakamoto_block_burnchain( db_handle, - expected_burn_opt, + expected_burn, &block, config.mainnet, config.chain_id, @@ -1755,10 +1736,10 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg)); } - // if we pass all the tests, then along the way, we will have verified (in - // Self::validate_nakamoto_block_burnchain) that the consensus hash of this block is on the - // same sortition history as `db_handle` (and thus it must be burn_attachable) - let burn_attachable = true; + // if the burnchain block of this Stacks block's tenure has been processed, then it + // is ready to be processed from the perspective of the burnchain + let burn_attachable = + SortitionDB::has_block_snapshot_consensus(&db_handle, &block.header.consensus_hash)?; let _block_id = block.block_id(); Self::store_block(staging_db_tx, block, burn_attachable)?; @@ -1767,13 +1748,12 @@ impl NakamotoChainState { } /// Get the aggregate public key for the given block from the signers-voting contract - pub(crate) fn load_aggregate_public_key( + fn load_aggregate_public_key( sortdb: &SortitionDB, sort_handle: &SH, chainstate: &mut StacksChainState, for_burn_block_height: u64, at_block_id: &StacksBlockId, - warn_if_not_found: bool, ) -> Result { // Get the current reward cycle let Some(rc) = sort_handle.pox_constants().block_height_to_reward_cycle( @@ -1789,22 +1769,18 @@ impl NakamotoChainState { return Err(ChainstateError::InvalidStacksBlock(msg)); }; - test_debug!( + debug!( "get-approved-aggregate-key at block {}, cycle {}", - at_block_id, - rc + at_block_id, rc ); match chainstate.get_aggregate_public_key_pox_4(sortdb, at_block_id, rc)? { Some(key) => Ok(key), None => { - // this can happen for a whole host of reasons - if warn_if_not_found { - warn!( - "Failed to get aggregate public key"; - "block_id" => %at_block_id, - "reward_cycle" => rc, - ); - } + warn!( + "Failed to get aggregate public key"; + "block_id" => %at_block_id, + "reward_cycle" => rc, + ); Err(ChainstateError::InvalidStacksBlock( "Failed to get aggregate public key".into(), )) @@ -1826,11 +1802,6 @@ impl NakamotoChainState { .ok_or(ChainstateError::DBError(DBError::NotFoundError))?; let aggregate_key_block_header = Self::get_canonical_block_header(chainstate.db(), sortdb)?.unwrap(); - let epoch_id = SortitionDB::get_stacks_epoch(sortdb.conn(), block_sn.block_height)? - .ok_or(ChainstateError::InvalidStacksBlock( - "Failed to get epoch ID".into(), - ))? - .epoch_id; let aggregate_public_key = Self::load_aggregate_public_key( sortdb, @@ -1838,7 +1809,6 @@ impl NakamotoChainState { chainstate, block_sn.block_height, &aggregate_key_block_header.index_block_hash(), - epoch_id >= StacksEpochId::Epoch30, )?; Ok(aggregate_public_key) } @@ -1929,18 +1899,6 @@ impl NakamotoChainState { Ok(None) } - /// Load a Nakamoto header - pub fn get_block_header_nakamoto( - chainstate_conn: &Connection, - index_block_hash: &StacksBlockId, - ) -> Result, ChainstateError> { - let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; - let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { - "FATAL: multiple rows for the same block hash".to_string() - })?; - Ok(result) - } - /// Load an epoch2 header pub fn get_block_header_epoch2( chainstate_conn: &Connection, @@ -1959,8 +1917,12 @@ impl NakamotoChainState { chainstate_conn: &Connection, index_block_hash: &StacksBlockId, ) -> Result, ChainstateError> { - if let Some(header) = Self::get_block_header_nakamoto(chainstate_conn, index_block_hash)? { - return Ok(Some(header)); + let sql = "SELECT * FROM nakamoto_block_headers WHERE index_block_hash = ?1"; + let result = query_row_panic(chainstate_conn, sql, &[&index_block_hash], || { + "FATAL: multiple rows for the same block hash".to_string() + })?; + if result.is_some() { + return Ok(result); } Self::get_block_header_epoch2(chainstate_conn, index_block_hash) @@ -2308,7 +2270,6 @@ impl NakamotoChainState { burn_stack_stx_ops: Vec, burn_transfer_stx_ops: Vec, burn_delegate_stx_ops: Vec, - burn_vote_for_aggregate_key_ops: Vec, new_tenure: bool, block_fees: u128, ) -> Result { @@ -2393,7 +2354,6 @@ impl NakamotoChainState { burn_stack_stx_ops, burn_transfer_stx_ops, burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, )?; if let Some(matured_miner_payouts) = mature_miner_payouts_opt { @@ -2529,7 +2489,7 @@ impl NakamotoChainState { }; // TODO: only need to do this if this is a tenure-start block - let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_ops) = + let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx, &parent_index_hash, @@ -2656,10 +2616,6 @@ impl NakamotoChainState { burn_header_height.into(), coinbase_height, )?; - tx_receipts.extend(StacksChainState::process_vote_for_aggregate_key_ops( - &mut clarity_tx, - vote_for_agg_key_ops.clone(), - )); } else { signer_set_calc = None; } @@ -2681,7 +2637,6 @@ impl NakamotoChainState { auto_unlock_events, burn_delegate_stx_ops: delegate_burn_ops, signer_set_calc, - burn_vote_for_aggregate_key_ops: vote_for_agg_key_ops, }) } @@ -2739,7 +2694,7 @@ impl NakamotoChainState { ChainstateError, > { debug!( - "Process Nakamoto block {:?} with {} transactions", + "Process block {:?} with {} transactions", &block.header.block_hash().to_hex(), block.txs.len() ); @@ -2773,10 +2728,8 @@ impl NakamotoChainState { // look up this block's sortition's burnchain block hash and height. // It must exist in the same Bitcoin fork as our `burn_dbconn`. - let tenure_block_snapshot = + let (burn_header_hash, burn_header_height) = Self::check_sortition_exists(burn_dbconn, &block.header.consensus_hash)?; - let burn_header_hash = tenure_block_snapshot.burn_header_hash.clone(); - let burn_header_height = tenure_block_snapshot.block_height; let block_hash = block.header.block_hash(); let new_tenure = match block.is_wellformed_tenure_start_block() { @@ -2824,7 +2777,7 @@ impl NakamotoChainState { Self::get_coinbase_height(chainstate_tx.deref(), &parent_block_id)?.ok_or_else( || { warn!( - "Parent of Nakamoto block is not in block headers DB yet"; + "Parent of Nakamoto block in block headers DB yet"; "block_hash" => %block.header.block_hash(), "parent_block_hash" => %parent_block_hash, "parent_block_id" => %parent_block_id @@ -2834,77 +2787,6 @@ impl NakamotoChainState { )? }; - let expected_burn_opt = Self::get_expected_burns(burn_dbconn, chainstate_tx, block) - .map_err(|e| { - warn!("Unacceptable Nakamoto block: could not load expected burns (unable to find its paired sortition)"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - "error" => e.to_string(), - ); - ChainstateError::InvalidStacksBlock("Invalid Nakamoto block: could not find sortition burns".into()) - })?; - - let Some(expected_burn) = expected_burn_opt else { - warn!("Unacceptable Nakamoto block: unable to find parent block's burns"; - "block_id" => %block.block_id(), - "parent_block_id" => %block.header.parent_block_id, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Invalid Nakamoto block: could not find sortition burns".into(), - )); - }; - - // this block must commit to all of the burnchain spends seen so far - if block.header.burn_spent != expected_burn { - warn!("Invalid Nakamoto block header: invalid total burns"; - "header.burn_spent" => block.header.burn_spent, - "expected_burn" => expected_burn, - ); - return Err(ChainstateError::InvalidStacksBlock( - "Invalid Nakamoto block: invalid total burns".into(), - )); - } - - // this block's tenure's block-commit contains the hash of the parent tenure's tenure-start - // block. - // (note that we can't check this earlier, since we need the parent tenure to have been - // processed) - if new_tenure && parent_chain_tip.is_nakamoto_block() && !block.is_first_mined() { - let tenure_block_commit = burn_dbconn - .get_block_commit( - &tenure_block_snapshot.winning_block_txid, - &tenure_block_snapshot.sortition_id, - )? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: has no block-commit in its sortition"; - "block_id" => %block.header.block_id(), - "sortition_id" => %tenure_block_snapshot.sortition_id, - "block_commit_txid" => %tenure_block_snapshot.winning_block_txid); - ChainstateError::NoSuchBlockError - })?; - - let parent_tenure_start_header = - Self::get_nakamoto_tenure_start_block_header(chainstate_tx.tx(), &parent_ch)? - .ok_or_else(|| { - warn!("Invalid Nakamoto block: no start-tenure block for parent"; - "parent_consensus_hash" => %parent_ch, - "block_id" => %block.header.block_id()); - - ChainstateError::NoSuchBlockError - })?; - - if parent_tenure_start_header.index_block_hash() != tenure_block_commit.last_tenure_id() - { - warn!("Invalid Nakamoto block: its tenure's block-commit's block ID hash does not match its parent tenure's start block"; - "block_id" => %block.header.block_id(), - "parent_consensus_hash" => %parent_ch, - "parent_tenure_start_block_id" => %parent_tenure_start_header.index_block_hash(), - "block_commit.last_tenure_id" => %tenure_block_commit.last_tenure_id()); - - return Err(ChainstateError::NoSuchBlockError); - } - } - // verify VRF proof, if present // only need to do this once per tenure // get the resulting vrf proof bytes @@ -2962,7 +2844,6 @@ impl NakamotoChainState { burn_delegate_stx_ops, mut auto_unlock_events, signer_set_calc, - burn_vote_for_aggregate_key_ops, } = Self::setup_block( chainstate_tx, clarity_instance, @@ -3122,7 +3003,6 @@ impl NakamotoChainState { burn_stack_stx_ops, burn_transfer_stx_ops, burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, new_tenure, block_fees, ) @@ -3260,13 +3140,13 @@ impl NakamotoChainState { let signers = miner_key_hash160s .into_iter() .map(|hash160| - // each miner gets two slots + // each miner gets one slot ( StacksAddress { version: 1, // NOTE: the version is ignored in stackerdb; we only care about the hashbytes bytes: hash160 }, - MINER_SLOT_COUNT, + 1 )) .collect(); @@ -3280,34 +3160,36 @@ impl NakamotoChainState { }) } - /// Get the slot range for the given miner's public key. - /// Returns Some(Range) if the miner is in the StackerDB config, where the range of slots for the miner is [start, end). - /// i.e., inclusive of `start`, exclusive of `end`. + /// Get the slot number for the given miner's public key. + /// Returns Some(u32) if the miner is in the StackerDB config. /// Returns None if the miner is not in the StackerDB config. /// Returns an error if the miner is in the StackerDB config but the slot number is invalid. pub fn get_miner_slot( sortdb: &SortitionDB, tip: &BlockSnapshot, miner_pubkey: &StacksPublicKey, - ) -> Result>, ChainstateError> { + ) -> Result, ChainstateError> { let miner_hash160 = Hash160::from_node_public_key(&miner_pubkey); let stackerdb_config = Self::make_miners_stackerdb_config(sortdb, &tip)?; // find out which slot we're in - let mut slot_index = 0; - let mut slot_id_result = None; - for (addr, slot_count) in stackerdb_config.signers.iter() { - if addr.bytes == miner_hash160 { - slot_id_result = Some(Range { - start: slot_index, - end: slot_index + slot_count, - }); - break; - } - slot_index += slot_count; - } - - let Some(slot_id_range) = slot_id_result else { + let Some(slot_id_res) = + stackerdb_config + .signers + .iter() + .enumerate() + .find_map(|(i, (addr, _))| { + if addr.bytes == miner_hash160 { + Some(u32::try_from(i).map_err(|_| { + CodecError::OverflowError( + "stackerdb config slot ID cannot fit into u32".into(), + ) + })) + } else { + None + } + }) + else { // miner key does not match any slot warn!("Miner is not in the miners StackerDB config"; "miner" => %miner_hash160, @@ -3315,7 +3197,7 @@ impl NakamotoChainState { return Ok(None); }; - Ok(Some(slot_id_range)) + Ok(Some(slot_id_res?)) } /// Boot code instantiation for the aggregate public key. diff --git a/stackslib/src/chainstate/nakamoto/signer_set.rs b/stackslib/src/chainstate/nakamoto/signer_set.rs index e776ca41dba..22e62d67813 100644 --- a/stackslib/src/chainstate/nakamoto/signer_set.rs +++ b/stackslib/src/chainstate/nakamoto/signer_set.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::BTreeMap; use std::ops::DerefMut; use clarity::vm::ast::ASTRules; @@ -40,7 +40,9 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, StacksPublicKey, TrieHash, VRFSeed, }; -use stacks_common::types::{PrivateKey, StacksEpochId}; +use stacks_common::types::{ + PrivateKey, StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet, +}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{to_hex, Hash160, MerkleHashFunc, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::retry::BoundReader; @@ -514,7 +516,7 @@ impl NakamotoSigners { return false; } if origin_nonce < *account_nonce { - debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({origin_nonce} < {account_nonce})."); + debug!("valid_vote_transaction: Received a transaction with an outdated nonce ({account_nonce} < {origin_nonce})."); return false; } Self::parse_vote_for_aggregate_public_key(transaction).is_some() diff --git a/stackslib/src/chainstate/nakamoto/staging_blocks.rs b/stackslib/src/chainstate/nakamoto/staging_blocks.rs index 0702a890701..c0d91777839 100644 --- a/stackslib/src/chainstate/nakamoto/staging_blocks.rs +++ b/stackslib/src/chainstate/nakamoto/staging_blocks.rs @@ -19,7 +19,6 @@ use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use lazy_static::lazy_static; -use rusqlite::blob::Blob; use rusqlite::types::{FromSql, FromSqlError}; use rusqlite::{params, Connection, OpenFlags, OptionalExtension, ToSql, NO_PARAMS}; use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; @@ -43,13 +42,10 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ CREATE TABLE nakamoto_staging_blocks ( -- SHA512/256 hash of this block block_hash TEXT NOT NULL, - -- The consensus hash of the burnchain block that selected this block's miner's block-commit. - -- This identifies the tenure to which this block belongs. + -- the consensus hash of the burnchain block that selected this block's miner's block-commit consensus_hash TEXT NOT NULL, -- the parent index_block_hash parent_block_id TEXT NOT NULL, - -- whether or not this is the first block in its tenure - is_tenure_start BOOL NOT NULL, -- has the burnchain block with this block's `consensus_hash` been processed? burn_attachable INT NOT NULL, @@ -58,7 +54,6 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ -- set to 1 if this block can never be attached orphaned INT NOT NULL, - -- block height height INT NOT NULL, -- used internally -- this is the StacksBlockId of this block's consensus hash and block hash @@ -75,9 +70,7 @@ pub const NAKAMOTO_STAGING_DB_SCHEMA_1: &'static [&'static str] = &[ PRIMARY KEY(block_hash,consensus_hash) );"#, - r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, - r#"CREATE INDEX nakamoto_staging_blocks_by_index_block_hash_and_consensus_hash ON nakamoto_staging_blocks(index_block_hash,consensus_hash);"#, - r#"CREATE INDEX nakamoto_staging_blocks_by_tenure_start_block ON nakamoto_staging_blocks(is_tenure_start,consensus_hash);"#, + r#"CREATE INDEX by_index_block_hash ON nakamoto_staging_blocks(index_block_hash);"#, ]; pub struct NakamotoStagingBlocksConn(rusqlite::Connection); @@ -141,34 +134,7 @@ impl<'a> DerefMut for NakamotoStagingBlocksTx<'a> { } } -impl NakamotoStagingBlocksConn { - /// Open a Blob handle to a Nakamoto block - pub fn open_nakamoto_block<'a>( - &'a self, - rowid: i64, - readwrite: bool, - ) -> Result, ChainstateError> { - let blob = self.blob_open( - rusqlite::DatabaseName::Main, - "nakamoto_staging_blocks", - "data", - rowid, - !readwrite, - )?; - Ok(blob) - } -} - impl<'a> NakamotoStagingBlocksConnRef<'a> { - /// Determine if there exists any unprocessed Nakamoto blocks - /// Returns Ok(true) if so - /// Returns Ok(false) if not - pub fn has_any_unprocessed_nakamoto_block(&self) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE processed = 0 LIMIT 1"; - let res: Option = query_row(self, qry, NO_PARAMS)?; - Ok(res.is_some()) - } - /// Determine whether or not we have processed at least one Nakamoto block in this sortition history. /// NOTE: the relevant field queried from `nakamoto_staging_blocks` is updated by a separate /// tx from block-processing, so it's imperative that the thread that calls this function is @@ -192,57 +158,10 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { Ok(res.is_some()) } - /// Determine if we have a particular block - /// Returns Ok(true) if so - /// Returns Ok(false) if not - /// Returns Err(..) on DB error - pub fn has_nakamoto_block( - &self, - index_block_hash: &StacksBlockId, - ) -> Result { - let qry = "SELECT 1 FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; - let res: Option = query_row(self, qry, args)?; - Ok(res.is_some()) - } - - /// Get a staged Nakamoto tenure-start block - pub fn get_nakamoto_tenure_start_block( - &self, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE is_tenure_start = 1 AND consensus_hash = ?1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - let data: Option> = query_row(self, qry, args)?; - let Some(block_bytes) = data else { - return Ok(None); - }; - let block = NakamotoBlock::consensus_deserialize(&mut block_bytes.as_slice())?; - if &block.header.consensus_hash != consensus_hash { - error!( - "Staging DB corruption: expected {}, got {}", - consensus_hash, block.header.consensus_hash - ); - return Err(DBError::Corruption.into()); - } - Ok(Some(block)) - } - - /// Get the rowid of a Nakamoto block - pub fn get_nakamoto_block_rowid( - &self, - index_block_hash: &StacksBlockId, - ) -> Result, ChainstateError> { - let sql = "SELECT rowid FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; - let res: Option = query_row(self, sql, args)?; - Ok(res) - } - /// Get a Nakamoto block by index block hash, as well as its size. /// Verifies its integrity. /// Returns Ok(Some(block, size)) if the block was present - /// Returns Ok(None) if there was no such block + /// Returns Ok(None) if there were no such rows. /// Returns Err(..) on DB error, including block corruption pub fn get_nakamoto_block( &self, @@ -269,21 +188,6 @@ impl<'a> NakamotoStagingBlocksConnRef<'a> { ))) } - /// Get the size of a Nakamoto block, given its index block hash - /// Returns Ok(Some(size)) if the block was present - /// Returns Ok(None) if there was no such block - /// Returns Err(..) on DB error, including block corruption - pub fn get_nakamoto_block_size( - &self, - index_block_hash: &StacksBlockId, - ) -> Result, ChainstateError> { - let qry = "SELECT length(data) FROM nakamoto_staging_blocks WHERE index_block_hash = ?1"; - let args: &[&dyn ToSql] = &[index_block_hash]; - let res = query_row(self, qry, args)? - .map(|size: i64| u64::try_from(size).expect("FATAL: block size exceeds i64::MAX")); - Ok(res) - } - /// Find the next ready-to-process Nakamoto block, given a connection to the staging blocks DB. /// NOTE: the relevant field queried from `nakamoto_staging_blocks` are updated by a separate /// tx from block-processing, so it's imperative that the thread that calls this function is @@ -448,9 +352,7 @@ impl StacksChainState { /// Get the path to the Nakamoto staging blocks DB. /// It's separate from the headers DB in order to avoid DB contention between downloading /// blocks and processing them. - pub fn static_get_nakamoto_staging_blocks_path( - root_path: PathBuf, - ) -> Result { + pub fn get_nakamoto_staging_blocks_path(root_path: PathBuf) -> Result { let mut nakamoto_blocks_path = Self::blocks_path(root_path); nakamoto_blocks_path.push("nakamoto.sqlite"); Ok(nakamoto_blocks_path @@ -459,11 +361,6 @@ impl StacksChainState { .to_string()) } - /// Get the path to the Nakamoto staging blocks DB. - pub fn get_nakamoto_staging_blocks_path(&self) -> Result { - Self::static_get_nakamoto_staging_blocks_path(PathBuf::from(self.root_path.as_str())) - } - /// Open and set up a DB for nakamoto staging blocks. /// If it doesn't exist, then instantiate it if `readwrite` is true. pub fn open_nakamoto_staging_blocks( diff --git a/stackslib/src/chainstate/nakamoto/tenure.rs b/stackslib/src/chainstate/nakamoto/tenure.rs index 7389c033374..7f0fad030d7 100644 --- a/stackslib/src/chainstate/nakamoto/tenure.rs +++ b/stackslib/src/chainstate/nakamoto/tenure.rs @@ -89,7 +89,13 @@ use stacks_common::util::vrf::{VRFProof, VRFPublicKey, VRF}; use wsts::curve::point::Point; use crate::burnchains::{PoxConstants, Txid}; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle, SortitionHandleTx}; +use crate::chainstate::burn::db::sortdb::{ + get_ancestor_sort_id, get_ancestor_sort_id_tx, get_block_commit_by_txid, SortitionDB, + SortitionHandle, SortitionHandleConn, SortitionHandleTx, +}; +use crate::chainstate::burn::operations::{ + DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, StackStxOp, TransferStxOp, +}; use crate::chainstate::burn::{BlockSnapshot, SortitionHash}; use crate::chainstate::coordinator::{BlockEventDispatcher, Error}; use crate::chainstate::nakamoto::{ @@ -148,11 +154,9 @@ pub static NAKAMOTO_TENURES_SCHEMA: &'static str = r#" PRIMARY KEY(burn_view_consensus_hash,tenure_index) ); CREATE INDEX nakamoto_tenures_by_block_id ON nakamoto_tenures(block_id); - CREATE INDEX nakamoto_tenures_by_tenure_id ON nakamoto_tenures(tenure_id_consensus_hash); CREATE INDEX nakamoto_tenures_by_block_and_consensus_hashes ON nakamoto_tenures(tenure_id_consensus_hash,block_hash); CREATE INDEX nakamoto_tenures_by_burn_view_consensus_hash ON nakamoto_tenures(burn_view_consensus_hash); CREATE INDEX nakamoto_tenures_by_tenure_index ON nakamoto_tenures(tenure_index); - CREATE INDEX nakamoto_tenures_by_parent ON nakamoto_tenures(tenure_id_consensus_hash,prev_tenure_id_consensus_hash); "#; #[derive(Debug, Clone, PartialEq)] @@ -388,18 +392,6 @@ impl NakamotoChainState { .map_err(|_| ChainstateError::DBError(DBError::ParseError)) } - /// Determine if a tenure has been fully processed. - pub fn has_processed_nakamoto_tenure( - conn: &Connection, - tenure_id_consensus_hash: &ConsensusHash, - ) -> Result { - // a tenure will have been processed if any of its children have been processed - let sql = "SELECT 1 FROM nakamoto_tenures WHERE prev_tenure_id_consensus_hash = ?1 LIMIT 1"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let found: Option = query_row(conn, sql, args)?; - Ok(found.is_some()) - } - /// Insert a nakamoto tenure. /// No validation will be done. pub(crate) fn insert_nakamoto_tenure( @@ -460,18 +452,6 @@ impl NakamotoChainState { .map_err(ChainstateError::DBError) } - /// Get the consensus hash of the parent tenure - /// Used by the p2p code. - /// Don't use in consensus code. - pub fn get_nakamoto_parent_tenure_id_consensus_hash( - chainstate_conn: &Connection, - consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let sql = "SELECT prev_tenure_id_consensus_hash AS consensus_hash FROM nakamoto_tenures WHERE tenure_id_consensus_hash = ?1 ORDER BY tenure_index DESC LIMIT 1"; - let args: &[&dyn ToSql] = &[consensus_hash]; - query_row(chainstate_conn, sql, args).map_err(ChainstateError::DBError) - } - /// Get the last block header in a Nakamoto tenure pub fn get_nakamoto_tenure_finish_block_header( chainstate_conn: &Connection, @@ -1024,13 +1004,12 @@ impl NakamotoChainState { )) } - /// Check that a given Nakamoto block's tenure's sortition exists and was processed on this - /// particular burnchain fork. - /// Return the block snapshot if so. + /// Check that a given Nakamoto block's tenure's sortition exists and was processed. + /// Return the sortition's burnchain block's hash and its burnchain height pub(crate) fn check_sortition_exists( burn_dbconn: &mut SortitionHandleTx, block_consensus_hash: &ConsensusHash, - ) -> Result { + ) -> Result<(BurnchainHeaderHash, u64), ChainstateError> { // check that the burnchain block that this block is associated with has been processed. // N.B. we must first get its hash, and then verify that it's in the same Bitcoin fork as // our `burn_dbconn` indicates. @@ -1045,7 +1024,7 @@ impl NakamotoChainState { })?; let sortition_tip = burn_dbconn.context.chain_tip.clone(); - let snapshot = burn_dbconn + let burn_header_height = burn_dbconn .get_block_snapshot(&burn_header_hash, &sortition_tip)? .ok_or_else(|| { warn!( @@ -1053,8 +1032,9 @@ impl NakamotoChainState { "burn_header_hash" => %burn_header_hash, ); ChainstateError::NoSuchBlockError - })?; + })? + .block_height; - Ok(snapshot) + Ok((burn_header_hash, burn_header_height)) } } diff --git a/stackslib/src/chainstate/nakamoto/test_signers.rs b/stackslib/src/chainstate/nakamoto/test_signers.rs index 30a1ba81208..5d211f95308 100644 --- a/stackslib/src/chainstate/nakamoto/test_signers.rs +++ b/stackslib/src/chainstate/nakamoto/test_signers.rs @@ -15,20 +15,20 @@ // along with this program. If not, see . use std::cell::RefCell; -use std::collections::{HashSet, VecDeque}; +use std::collections::VecDeque; use std::path::{Path, PathBuf}; use std::{fs, io}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; -use hashbrown::HashMap; use rand::seq::SliceRandom; use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -119,7 +119,7 @@ impl Default for TestSigners { Self { signer_parties, aggregate_public_key, - poly_commitments, + poly_commitments: poly_commitments.into(), num_keys, threshold, party_key_ids, @@ -147,13 +147,6 @@ impl TestSigners { let signature = sig_aggregator .sign(msg.as_slice(), &nonces, &sig_shares, &key_ids) .expect("aggregator sig failed"); - - test_debug!( - "Signed Nakamoto block {} with {} (rc {})", - block.block_id(), - &self.aggregate_public_key, - cycle - ); block.header.signer_signature = ThresholdSignature(signature); } @@ -186,7 +179,7 @@ impl TestSigners { .collect(); self.poly_commitments = match wsts::v2::test_helpers::dkg(&mut self.signer_parties, &mut rng) { - Ok(poly_commitments) => poly_commitments, + Ok(poly_commitments) => poly_commitments.into(), Err(secret_errors) => { panic!("Got secret errors from DKG: {:?}", secret_errors); } diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 9844fa7b74f..1f7f065ce0e 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use std::borrow::BorrowMut; -use std::collections::HashMap; use std::fs; use clarity::types::chainstate::{PoxId, SortitionId, StacksBlockId}; @@ -24,7 +23,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::StacksAddressExtensions; use clarity::vm::Value; use rand::{thread_rng, RngCore}; -use rusqlite::{Connection, ToSql}; +use rusqlite::Connection; use stacks_common::address::AddressHashMode; use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; @@ -35,7 +34,9 @@ use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, StacksWorkScore, TrieHash, VRFSeed, }; -use stacks_common::types::{Address, PrivateKey, StacksEpoch, StacksEpochId}; +use stacks_common::types::{ + Address, PrivateKey, StacksEpoch, StacksEpochId, StacksHashMap as HashMap, +}; use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::{hex_bytes, Hash160, MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; @@ -60,13 +61,11 @@ use crate::chainstate::coordinator::tests::{ use crate::chainstate::nakamoto::coordinator::tests::boot_nakamoto; use crate::chainstate::nakamoto::miner::NakamotoBlockBuilder; use crate::chainstate::nakamoto::signer_set::NakamotoSigners; -use crate::chainstate::nakamoto::staging_blocks::NakamotoStagingBlocksConnRef; use crate::chainstate::nakamoto::tenure::NakamotoTenure; use crate::chainstate::nakamoto::test_signers::TestSigners; use crate::chainstate::nakamoto::tests::node::TestStacker; use crate::chainstate::nakamoto::{ - query_rows, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, - FIRST_STACKS_BLOCK_ID, + NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, SortitionHandle, FIRST_STACKS_BLOCK_ID, }; use crate::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, @@ -77,37 +76,18 @@ use crate::chainstate::stacks::db::{ StacksHeaderInfo, }; use crate::chainstate::stacks::{ - CoinbasePayload, Error as ChainstateError, StacksBlock, StacksBlockHeader, StacksTransaction, - StacksTransactionSigner, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionContractCall, - TransactionPayload, TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, + CoinbasePayload, StacksBlock, StacksBlockHeader, StacksTransaction, StacksTransactionSigner, + TenureChangeCause, TenureChangePayload, ThresholdSignature, TokenTransferMemo, + TransactionAnchorMode, TransactionAuth, TransactionContractCall, TransactionPayload, + TransactionPostConditionMode, TransactionSmartContract, TransactionVersion, }; use crate::core; use crate::core::{StacksEpochExtension, STACKS_EPOCH_3_0_MARKER}; use crate::net::codec::test::check_codec_and_corruption; -use crate::net::stackerdb::MINER_SLOT_COUNT; use crate::util_lib::boot::boot_code_id; use crate::util_lib::db::Error as db_error; use crate::util_lib::strings::StacksString; -impl<'a> NakamotoStagingBlocksConnRef<'a> { - #[cfg(test)] - pub fn get_all_blocks_in_tenure( - &self, - tenure_id_consensus_hash: &ConsensusHash, - ) -> Result, ChainstateError> { - let qry = "SELECT data FROM nakamoto_staging_blocks WHERE consensus_hash = ?1 ORDER BY height ASC"; - let args: &[&dyn ToSql] = &[tenure_id_consensus_hash]; - let block_data: Vec> = query_rows(self, qry, args)?; - let mut blocks = Vec::with_capacity(block_data.len()); - for data in block_data.into_iter() { - let block = NakamotoBlock::consensus_deserialize(&mut data.as_slice())?; - blocks.push(block); - } - Ok(blocks) - } -} - /// Get an address's account pub fn get_account( chainstate: &mut StacksChainState, @@ -2058,14 +2038,12 @@ fn test_make_miners_stackerdb_config() { .collect(); // active miner alternates slots (part of stability) - let first_miner_slot = 0; - let second_miner_slot = first_miner_slot + MINER_SLOT_COUNT; - assert_eq!(stackerdb_chunks[0].slot_id, first_miner_slot); - assert_eq!(stackerdb_chunks[1].slot_id, second_miner_slot); - assert_eq!(stackerdb_chunks[2].slot_id, first_miner_slot); - assert_eq!(stackerdb_chunks[3].slot_id, second_miner_slot); - assert_eq!(stackerdb_chunks[4].slot_id, first_miner_slot); - assert_eq!(stackerdb_chunks[5].slot_id, second_miner_slot); + assert_eq!(stackerdb_chunks[0].slot_id, 0); + assert_eq!(stackerdb_chunks[1].slot_id, 1); + assert_eq!(stackerdb_chunks[2].slot_id, 0); + assert_eq!(stackerdb_chunks[3].slot_id, 1); + assert_eq!(stackerdb_chunks[4].slot_id, 0); + assert_eq!(stackerdb_chunks[5].slot_id, 1); assert!(stackerdb_chunks[0].verify(&miner_addrs[1]).unwrap()); assert!(stackerdb_chunks[1].verify(&miner_addrs[2]).unwrap()); @@ -2379,7 +2357,7 @@ fn valid_vote_transaction() { }), }; valid_tx.set_origin_nonce(1); - let mut account_nonces = std::collections::HashMap::new(); + let mut account_nonces = HashMap::new(); account_nonces.insert(valid_tx.origin_address(), 1); assert!(NakamotoSigners::valid_vote_transaction( &account_nonces, @@ -2600,7 +2578,7 @@ fn valid_vote_transaction_malformed_transactions() { }; invalid_nonce.set_origin_nonce(0); // old nonce - let mut account_nonces = std::collections::HashMap::new(); + let mut account_nonces = HashMap::new(); account_nonces.insert(invalid_not_contract_call.origin_address(), 1); for tx in vec![ invalid_not_contract_call, @@ -2731,7 +2709,7 @@ fn filter_one_transaction_per_signer_multiple_addresses() { }; valid_tx_2_address_2.set_origin_nonce(2); let mut filtered_transactions = HashMap::new(); - let mut account_nonces = std::collections::HashMap::new(); + let mut account_nonces = HashMap::new(); account_nonces.insert(valid_tx_1_address_1.origin_address(), 1); account_nonces.insert(valid_tx_1_address_2.origin_address(), 1); NakamotoSigners::update_filtered_transactions( @@ -2746,7 +2724,10 @@ fn filter_one_transaction_per_signer_multiple_addresses() { valid_tx_2_address_1, ], ); - let txs: Vec<_> = filtered_transactions.into_values().collect(); + let txs: Vec<_> = filtered_transactions + .iter() + .map(|(_, v)| v.clone()) + .collect(); assert_eq!(txs.len(), 2); assert!(txs.contains(&valid_tx_1_address_1)); assert!(txs.contains(&valid_tx_1_address_2)); @@ -2828,7 +2809,7 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { }; valid_tx_3.set_origin_nonce(0); - let mut account_nonces = std::collections::HashMap::new(); + let mut account_nonces = HashMap::new(); account_nonces.insert(valid_tx_1.origin_address(), 0); let mut txs = vec![valid_tx_2, valid_tx_1, valid_tx_3]; let mut filtered_transactions = HashMap::new(); @@ -2838,7 +2819,10 @@ fn filter_one_transaction_per_signer_duplicate_nonces() { false, txs.clone(), ); - let filtered_txs: Vec<_> = filtered_transactions.into_values().collect(); + let filtered_txs: Vec<_> = filtered_transactions + .iter() + .map(|(_, v)| v.clone()) + .collect(); txs.sort_by(|a, b| a.txid().cmp(&b.txid())); assert_eq!(filtered_txs.len(), 1); assert!(filtered_txs.contains(&txs.first().expect("failed to get first tx"))); diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 570b0cc3d3d..3b1008c3b64 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -15,20 +15,20 @@ // along with this program. If not, see . use std::cell::RefCell; -use std::collections::{HashSet, VecDeque}; +use std::collections::VecDeque; use std::path::{Path, PathBuf}; use std::{fs, io}; use clarity::vm::clarity::ClarityConnection; use clarity::vm::costs::{ExecutionCost, LimitedCostTracker}; use clarity::vm::types::*; -use hashbrown::HashMap; use rand::seq::SliceRandom; use rand::{CryptoRng, RngCore, SeedableRng}; use rand_chacha::ChaCha20Rng; use stacks_common::address::*; use stacks_common::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use stacks_common::types::chainstate::{BlockHeaderHash, SortitionId, StacksBlockId, VRFSeed}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use stacks_common::util::hash::Hash160; use stacks_common::util::sleep_ms; use stacks_common::util::vrf::{VRFProof, VRFPublicKey}; @@ -563,22 +563,10 @@ impl TestStacksNode { Self::make_nakamoto_block_from_txs(builder, chainstate, &sortdb.index_conn(), txs) .unwrap(); miner.sign_nakamoto_block(&mut nakamoto_block); - - let tenure_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), tenure_id_consensus_hash) - .unwrap() - .unwrap(); - let cycle = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tenure_sn.block_height) - .unwrap(); - - test_debug!( - "Signing Nakamoto block {} in tenure {} with key in cycle {}", - nakamoto_block.block_id(), - tenure_id_consensus_hash, - cycle - ); + let cycle = miner + .burnchain + .block_height_to_reward_cycle(burn_tip.block_height) + .expect("FATAL: failed to get reward cycle"); signers.sign_nakamoto_block(&mut nakamoto_block, cycle); let block_id = nakamoto_block.block_id(); @@ -599,7 +587,6 @@ impl TestStacksNode { &mut sort_handle, chainstate, nakamoto_block.clone(), - None, ) { Ok(accepted) => accepted, Err(e) => { @@ -1048,27 +1035,10 @@ impl<'a> TestPeer<'a> { &[(NakamotoBlock, u64, ExecutionCost)], ) -> Vec, { + let cycle = self.get_reward_cycle(); let mut stacks_node = self.stacks_node.take().unwrap(); let sortdb = self.sortdb.take().unwrap(); - let tenure_extend_payload = - if let TransactionPayload::TenureChange(ref tc) = &tenure_extend_tx.payload { - tc - } else { - panic!("Not a tenure-extend payload"); - }; - - let tenure_start_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &tenure_extend_payload.tenure_consensus_hash, - ) - .unwrap() - .unwrap(); - let cycle = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tenure_start_sn.block_height) - .unwrap(); - // Ensure the signers are setup for the current cycle signers.generate_aggregate_key(cycle); @@ -1120,7 +1090,6 @@ impl<'a> TestPeer<'a> { &mut sort_handle, &mut node.chainstate, block, - None, ) .unwrap(); if accepted { diff --git a/stackslib/src/chainstate/stacks/boot/docs.rs b/stackslib/src/chainstate/stacks/boot/docs.rs index 62580f384a1..a0b2dd1b105 100644 --- a/stackslib/src/chainstate/stacks/boot/docs.rs +++ b/stackslib/src/chainstate/stacks/boot/docs.rs @@ -1,5 +1,5 @@ use clarity::vm::docs::contracts::{produce_docs_refs, ContractSupportDocs}; -use hashbrown::{HashMap, HashSet}; +use stacks_common::types::{StacksHashMap as HashMap, StacksHashSet as HashSet}; use super::STACKS_BOOT_CODE_MAINNET; diff --git a/stackslib/src/chainstate/stacks/boot/mod.rs b/stackslib/src/chainstate/stacks/boot/mod.rs index ff9bcbfd17e..67f485429be 100644 --- a/stackslib/src/chainstate/stacks/boot/mod.rs +++ b/stackslib/src/chainstate/stacks/boot/mod.rs @@ -228,8 +228,6 @@ pub struct RewardSet { #[serde(skip_serializing_if = "Option::is_none", default)] // only generated for nakamoto reward sets pub signers: Option>, - #[serde(default)] - pub pox_ustx_threshold: Option, } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] @@ -262,7 +260,6 @@ impl RewardSet { missed_reward_slots: vec![], }, signers: None, - pox_ustx_threshold: None, } } @@ -299,7 +296,7 @@ impl StacksChainState { pub fn handled_pox_cycle_start(clarity_db: &mut ClarityDatabase, cycle_number: u64) -> bool { let db_key = Self::handled_pox_cycle_start_key(cycle_number); match clarity_db - .get_data::(&db_key) + .get::(&db_key) .expect("FATAL: DB error when checking PoX cycle start") { Some(x) => x == POX_CYCLE_START_HANDLED_VALUE, @@ -312,7 +309,7 @@ impl StacksChainState { cycle_number: u64, ) -> Result<(), clarity::vm::errors::Error> { let db_key = Self::handled_pox_cycle_start_key(cycle_number); - db.put_data(&db_key, &POX_CYCLE_START_HANDLED_VALUE.to_string())?; + db.put(&db_key, &POX_CYCLE_START_HANDLED_VALUE.to_string())?; Ok(()) } @@ -432,7 +429,7 @@ impl StacksChainState { cycle_number: u64, cycle_info: Option, ) -> Result, Error> { - Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox2) + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_2_NAME) } /// Do all the necessary Clarity operations at the start of a PoX reward cycle. @@ -444,7 +441,7 @@ impl StacksChainState { cycle_number: u64, cycle_info: Option, ) -> Result, Error> { - Self::handle_pox_cycle_missed_unlocks(clarity, cycle_number, cycle_info, &PoxVersions::Pox3) + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_3_NAME) } /// Do all the necessary Clarity operations at the start of a PoX reward cycle. @@ -452,36 +449,29 @@ impl StacksChainState { /// /// This should only be called for PoX v4 cycles. pub fn handle_pox_cycle_start_pox_4( - _clarity: &mut ClarityTransactionConnection, - _cycle_number: u64, - _cycle_info: Option, + clarity: &mut ClarityTransactionConnection, + cycle_number: u64, + cycle_info: Option, ) -> Result, Error> { - // PASS - Ok(vec![]) + Self::handle_pox_cycle_start(clarity, cycle_number, cycle_info, POX_4_NAME) } /// Do all the necessary Clarity operations at the start of a PoX reward cycle. /// Currently, this just means applying any auto-unlocks to Stackers who qualified. /// - fn handle_pox_cycle_missed_unlocks( + fn handle_pox_cycle_start( clarity: &mut ClarityTransactionConnection, cycle_number: u64, cycle_info: Option, - pox_contract_ver: &PoxVersions, + pox_contract_name: &str, ) -> Result, Error> { clarity.with_clarity_db(|db| Ok(Self::mark_pox_cycle_handled(db, cycle_number)))??; - if !matches!(pox_contract_ver, PoxVersions::Pox2 | PoxVersions::Pox3) { - return Err(Error::InvalidStacksBlock(format!( - "Attempted to invoke missed unlocks handling on invalid PoX version ({pox_contract_ver})" - ))); - } - debug!( "Handling PoX reward cycle start"; "reward_cycle" => cycle_number, "cycle_active" => cycle_info.is_some(), - "pox_contract" => %pox_contract_ver, + "pox_contract" => pox_contract_name ); let cycle_info = match cycle_info { @@ -490,8 +480,7 @@ impl StacksChainState { }; let sender_addr = PrincipalData::from(boot::boot_code_addr(clarity.is_mainnet())); - let pox_contract = - boot::boot_code_id(pox_contract_ver.get_name_str(), clarity.is_mainnet()); + let pox_contract = boot::boot_code_id(pox_contract_name, clarity.is_mainnet()); let mut total_events = vec![]; for (principal, amount_locked) in cycle_info.missed_reward_slots.iter() { @@ -517,8 +506,7 @@ impl StacksChainState { }).expect("FATAL: failed to accelerate PoX unlock"); // query the stacking state for this user before deleting it - let user_data = - Self::get_user_stacking_state(clarity, principal, pox_contract_ver.get_name_str()); + let user_data = Self::get_user_stacking_state(clarity, principal, pox_contract_name); // perform the unlock let (result, _, mut events, _) = clarity @@ -823,19 +811,12 @@ impl StacksChainState { // pointer set by the PoX contract, then add them to auto-unlock list if slots_taken == 0 && !contributed_stackers.is_empty() { info!( - "{}", - if epoch_id.supports_pox_missed_slot_unlocks() { - "Stacker missed reward slot, added to unlock list" - } else { - "Stacker missed reward slot" - }; + "Stacker missed reward slot, added to unlock list"; + // "stackers" => %VecDisplay(&contributed_stackers), "reward_address" => %address.clone().to_b58(), "threshold" => threshold, "stacked_amount" => stacked_amt ); - if !epoch_id.supports_pox_missed_slot_unlocks() { - continue; - } contributed_stackers .sort_by_cached_key(|(stacker, ..)| to_hex(&stacker.serialize_to_vec())); while let Some((contributor, amt)) = contributed_stackers.pop() { @@ -855,9 +836,6 @@ impl StacksChainState { } } } - if !epoch_id.supports_pox_missed_slot_unlocks() { - missed_slots.clear(); - } info!("Reward set calculated"; "slots_occuppied" => reward_set.len()); RewardSet { rewarded_addresses: reward_set, @@ -865,7 +843,6 @@ impl StacksChainState { missed_reward_slots: missed_slots, }, signers: signer_set, - pox_ustx_threshold: Some(threshold), } } @@ -2680,7 +2657,6 @@ pub mod test { let block_txs = vec![coinbase_tx]; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -2717,10 +2693,6 @@ pub mod test { #[test] fn test_lockups() { - let burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); let mut peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); let alice = StacksAddress::from_string("STVK1K405H6SK9NKJAP32GHYHDJ98MMNP8Y6Z9N0").unwrap(); let bob = StacksAddress::from_string("ST76D2FMXZ7D2719PNE4N71KPSX84XCCNCMYC940").unwrap(); @@ -2807,7 +2779,6 @@ pub mod test { let block_txs = vec![coinbase_tx]; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -2902,8 +2873,7 @@ pub mod test { block_txs.push(tx); } - let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, - &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -2999,7 +2969,6 @@ pub mod test { let block_txs = vec![coinbase_tx, burn_tx]; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -3110,7 +3079,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -3327,7 +3295,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_block_builder( - &burnchain, false, &parent_tip, vrf_proof, @@ -3586,7 +3553,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -3861,7 +3827,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -4114,8 +4079,7 @@ pub mod test { block_txs.push(charlie_test_tx); } - let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, - &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -4278,7 +4242,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -4577,7 +4540,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -5158,7 +5120,6 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -5537,7 +5498,7 @@ pub mod test { block_txs.push(charlie_reject); } - let block_builder = StacksBlockBuilder::make_regtest_block_builder(&burnchain, &parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); if tenure_id == 2 { diff --git a/stackslib/src/chainstate/stacks/boot/pox-4.clar b/stackslib/src/chainstate/stacks/boot/pox-4.clar index 5f0daf8b7bf..681f8d9eab1 100644 --- a/stackslib/src/chainstate/stacks/boot/pox-4.clar +++ b/stackslib/src/chainstate/stacks/boot/pox-4.clar @@ -356,6 +356,63 @@ (define-read-only (get-reward-set-pox-address (reward-cycle uint) (index uint)) (map-get? reward-cycle-pox-address-list { reward-cycle: reward-cycle, index: index })) +(define-private (fold-unlock-reward-cycle (set-index uint) + (data-res (response { cycle: uint, + first-unlocked-cycle: uint, + stacker: principal + } int))) + (let ((data (try! data-res)) + (cycle (get cycle data)) + (first-unlocked-cycle (get first-unlocked-cycle data))) + ;; if current-cycle hasn't reached first-unlocked-cycle, just continue to next iter + (asserts! (>= cycle first-unlocked-cycle) (ok (merge data { cycle: (+ u1 cycle) }))) + (let ((cycle-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: set-index }))) + (cycle-entry-u (get stacker cycle-entry)) + (cycle-entry-total-ustx (get total-ustx cycle-entry)) + (cycle-last-entry-ix (- (get len (unwrap-panic (map-get? reward-cycle-pox-address-list-len { reward-cycle: cycle }))) u1))) + (asserts! (is-eq cycle-entry-u (some (get stacker data))) (err ERR_STACKING_CORRUPTED_STATE)) + (if (not (is-eq cycle-last-entry-ix set-index)) + ;; do a "move" if the entry to remove isn't last + (let ((move-entry (unwrap-panic (map-get? reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix })))) + (map-set reward-cycle-pox-address-list + { reward-cycle: cycle, index: set-index } + move-entry) + (match (get stacker move-entry) moved-stacker + ;; if the moved entry had an associated stacker, update its state + (let ((moved-state (unwrap-panic (map-get? stacking-state { stacker: moved-stacker }))) + ;; calculate the index into the reward-set-indexes that `cycle` is at + (moved-cycle-index (- cycle (get first-reward-cycle moved-state))) + (moved-reward-list (get reward-set-indexes moved-state)) + ;; reward-set-indexes[moved-cycle-index] = set-index via slice?, append, concat. + (update-list (unwrap-panic (replace-at? moved-reward-list moved-cycle-index set-index)))) + (map-set stacking-state { stacker: moved-stacker } + (merge moved-state { reward-set-indexes: update-list }))) + ;; otherwise, we don't need to update stacking-state after move + true)) + ;; if not moving, just noop + true) + ;; in all cases, we now need to delete the last list entry + (map-delete reward-cycle-pox-address-list { reward-cycle: cycle, index: cycle-last-entry-ix }) + (map-set reward-cycle-pox-address-list-len { reward-cycle: cycle } { len: cycle-last-entry-ix }) + ;; finally, update `reward-cycle-total-stacked` + (map-set reward-cycle-total-stacked { reward-cycle: cycle } + { total-ustx: (- (get total-ustx (unwrap-panic (map-get? reward-cycle-total-stacked { reward-cycle: cycle }))) + cycle-entry-total-ustx) }) + (ok (merge data { cycle: (+ u1 cycle)} ))))) + +;; This method is called by the Stacks block processor directly in order to handle the contract state mutations +;; associated with an early unlock. This can only be invoked by the block processor: it is private, and no methods +;; from this contract invoke it. +(define-private (handle-unlock (user principal) (amount-locked uint) (cycle-to-unlock uint)) + (let ((user-stacking-state (unwrap-panic (map-get? stacking-state { stacker: user }))) + (first-cycle-locked (get first-reward-cycle user-stacking-state)) + (reward-set-indexes (get reward-set-indexes user-stacking-state))) + ;; iterate over each reward set the user is a member of, and remove them from the sets. only apply to reward sets after cycle-to-unlock. + (try! (fold fold-unlock-reward-cycle reward-set-indexes (ok { cycle: first-cycle-locked, first-unlocked-cycle: cycle-to-unlock, stacker: user }))) + ;; Now that we've cleaned up all the reward set entries for the user, delete the user's stacking-state + (map-delete stacking-state { stacker: user }) + (ok true))) + ;; Add a PoX address to the `cycle-index`-th reward cycle, if `cycle-index` is between 0 and the given num-cycles (exclusive). ;; Arguments are given as a tuple, so this function can be (folded ..)'ed onto a list of its arguments. ;; Used by add-pox-addr-to-reward-cycles. diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 2c47f0ec0bd..07d34a04cc8 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -3782,7 +3782,6 @@ fn test_get_pox_addrs() { } let block_builder = StacksBlockBuilder::make_block_builder( - &burnchain, false, &parent_tip, vrf_proof, @@ -4079,7 +4078,6 @@ fn test_stack_with_segwit() { } let block_builder = StacksBlockBuilder::make_block_builder( - &burnchain, false, &parent_tip, vrf_proof, diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index ee64b6386d3..a785fe2f6ad 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -34,7 +34,6 @@ use clarity::vm::types::{ StacksAddressExtensions, StandardPrincipalData, TupleData, TupleTypeSignature, TypeSignature, Value, NONE, }; -use clarity::vm::Value::Optional; use stacks_common::address::AddressHashMode; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, VRFSeed, @@ -54,13 +53,13 @@ use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::tests::pox_addr_from; use crate::chainstate::stacks::address::{PoxAddress, PoxAddressType20, PoxAddressType32}; use crate::chainstate::stacks::boot::pox_2_tests::{ - check_pox_print_event, generate_pox_clarity_value, get_partial_stacked, get_reward_cycle_total, - get_reward_set_entries_at, get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, - PoxPrintFields, StackingStateCheckData, + check_pox_print_event, generate_pox_clarity_value, get_reward_set_entries_at, + get_stacking_state_pox, get_stx_account_at, with_clarity_db_ro, PoxPrintFields, + StackingStateCheckData, }; use crate::chainstate::stacks::boot::{ - PoxVersions, BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, - POX_2_NAME, POX_3_NAME, + BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, POX_2_NAME, + POX_3_NAME, }; use crate::chainstate::stacks::db::{ MinerPaymentSchedule, StacksChainState, StacksHeaderInfo, MINER_REWARD_MATURITY, @@ -90,48 +89,6 @@ pub fn get_tip(sortdb: Option<&SortitionDB>) -> BlockSnapshot { SortitionDB::get_canonical_burn_chain_tip(&sortdb.unwrap().conn()).unwrap() } -fn make_simple_pox_4_lock( - key: &StacksPrivateKey, - peer: &mut TestPeer, - amount: u128, - lock_period: u128, -) -> StacksTransaction { - let addr = key_to_stacks_addr(key); - let pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); - let signer_pk = StacksPublicKey::from_private(&key); - let tip = get_tip(peer.sortdb.as_ref()); - let next_reward_cycle = peer - .config - .burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - let nonce = get_account(peer, &addr.into()).nonce; - let auth_id = u128::from(nonce); - - let signature = make_signer_key_signature( - &pox_addr, - &key, - next_reward_cycle.into(), - &Pox4SignatureTopic::StackStx, - lock_period, - amount, - auth_id, - ); - - make_pox_4_lockup( - key, - nonce, - amount, - &pox_addr, - lock_period, - &signer_pk, - tip.block_height, - Some(signature), - amount, - auth_id, - ) -} - pub fn make_test_epochs_pox() -> (Vec, PoxConstants) { let EMPTY_SORTITIONS = 25; let EPOCH_2_1_HEIGHT = EMPTY_SORTITIONS + 11; // 36 @@ -546,8 +503,6 @@ fn pox_extend_transition() { u128::MAX, auth_id, ); - let alice_stack_signature = alice_signature.clone(); - let alice_stack_signer_key = alice_signer_key.clone(); let alice_lockup = make_pox_4_lockup( &alice, 2, @@ -658,8 +613,8 @@ fn pox_extend_transition() { 3, alice_pox_addr.clone(), 6, - alice_signer_key.clone(), - Some(alice_signature.clone()), + alice_signer_key, + Some(alice_signature), u128::MAX, 3, ); @@ -776,16 +731,6 @@ fn pox_extend_transition() { ), ("pox-addr", pox_addr_val.clone()), ("lock-period", Value::UInt(4)), - ( - "signer-sig", - Value::some(Value::buff_from(alice_stack_signature).unwrap()).unwrap(), - ), - ( - "signer-key", - Value::buff_from(alice_stack_signer_key.to_bytes_compressed()).unwrap(), - ), - ("max-amount", Value::UInt(u128::MAX)), - ("auth-id", Value::UInt(1)), ]); let common_data = PoxPrintFields { op_name: "stack-stx".to_string(), @@ -1306,540 +1251,30 @@ fn pox_3_unlocks() { while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); } - - info!( - "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height - ); - - // Check that STX are not locked for 3 reward cycles after pox4 starts - for _ in 0..3 { - let tip = get_tip(peer.sortdb.as_ref()); - let cycle = burnchain - .block_height_to_reward_cycle(tip.block_height) - .unwrap(); - - info!("Checking no stackers for cycle {cycle}"); - for _ in 0..burnchain.pox_constants.reward_cycle_length { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - assert_latest_was_burn(&mut peer); - } - - info!("Checking that stackers have no STX locked after cycle {cycle}"); - let balances = balances_from_keys(&mut peer, &latest_block, &keys); - assert_eq!(balances[0].amount_locked(), 0); - assert_eq!(balances[1].amount_locked(), 0); - } -} - -// This tests calls most pox-4 Clarity functions to check the existence of `start-cycle-id` and `end-cycle-id` -// in emitted pox events. -// In this set up, Steph is a solo stacker and invokes `stack-stx`, `stack-increase` and `stack-extend` functions -// Alice delegates to Bob via `delegate-stx` -// And Bob as the delegate, invokes 'delegate-stack-stx' and 'stack-aggregation-commit-indexed' -#[test] -fn pox_4_check_cycle_id_range_in_print_events() { - // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - function_name!(), - Some(epochs.clone()), - Some(&observer), - ); - - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; - let mut latest_block = None; - - // alice - let alice = keys.pop().unwrap(); - let alice_address = key_to_stacks_addr(&alice); - let alice_principal = PrincipalData::from(alice_address.clone()); - let alice_pox_addr = pox_addr_from(&alice); - - // bob - let bob = keys.pop().unwrap(); - let bob_address = key_to_stacks_addr(&bob); - let bob_principal = PrincipalData::from(bob_address.clone()); - let bob_pox_addr = pox_addr_from(&bob); - let bob_signing_key = Secp256k1PublicKey::from_private(&bob); - let bob_pox_addr_val = Value::Tuple(bob_pox_addr.as_clarity_tuple().unwrap()); - - // steph the solo stacker stacks stx so nakamoto signer set stays stacking. - let steph_key = keys.pop().unwrap(); - let steph_address = key_to_stacks_addr(&steph_key); - let steph_principal = PrincipalData::from(steph_address.clone()); - let steph_pox_addr_val = - make_pox_addr(AddressHashMode::SerializeP2PKH, steph_address.bytes.clone()); - let steph_pox_addr = pox_addr_from(&steph_key); - let steph_signing_key = Secp256k1PublicKey::from_private(&steph_key); - let steph_key_val = Value::buff_from(steph_signing_key.to_bytes_compressed()).unwrap(); - - let mut alice_nonce = 0; - let mut steph_nonce = 0; - let mut bob_nonce = 0; - - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); - } - - let reward_cycle = get_current_reward_cycle(&peer, &burnchain); - let next_reward_cycle = reward_cycle + 1; - - info!( - "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height - ); - - let lock_period = 1; - let block_height = get_tip(peer.sortdb.as_ref()).block_height; - let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); - - // stack-stx - let steph_stack_stx_nonce = steph_nonce; - let signature = make_signer_key_signature( - &steph_pox_addr, - &steph_key, - reward_cycle, - &Pox4SignatureTopic::StackStx, - lock_period, - u128::MAX, - 1, - ); - let steph_stacking = make_pox_4_lockup( - &steph_key, - steph_stack_stx_nonce, - min_ustx, - &steph_pox_addr.clone(), - lock_period, - &steph_signing_key, - block_height, - Some(signature), - u128::MAX, - 1, - ); - steph_nonce += 1; - - // stack-increase - let steph_stack_increase_nonce = steph_nonce; - let signature = make_signer_key_signature( - &steph_pox_addr, - &steph_key, - reward_cycle, - &Pox4SignatureTopic::StackIncrease, - lock_period, - u128::MAX, - 1, - ); - let steph_stack_increase = make_pox_4_stack_increase( - &steph_key, - steph_stack_increase_nonce, - 100, - &steph_signing_key, - Some(signature), - u128::MAX, - 1, - ); - steph_nonce += 1; - - // stack-extend - let steph_stack_extend_nonce = steph_nonce; - let stack_extend_signature = make_signer_key_signature( - &steph_pox_addr, - &steph_key, - reward_cycle, - &Pox4SignatureTopic::StackExtend, - 1_u128, - u128::MAX, - 1, - ); - let steph_stack_extend = make_pox_4_extend( - &steph_key, - steph_stack_extend_nonce, - steph_pox_addr, - lock_period, - steph_signing_key, - Some(stack_extend_signature), - u128::MAX, - 1, - ); - steph_nonce += 1; - - // alice delegates STX to bob - let target_height = get_tip(peer.sortdb.as_ref()).block_height - + (3 * pox_constants.reward_cycle_length as u64) // 3 cycles (next cycle + 2) - + 1; // additional few blocks shouldn't matter to unlock-cycle - let alice_delegate = make_pox_4_delegate_stx( - &alice, - alice_nonce, - min_ustx, - bob_principal.clone(), - Some(target_height as u128), - Some(bob_pox_addr.clone()), - ); - let alice_delegate_nonce = alice_nonce; - alice_nonce += 1; - - let curr_height = get_tip(peer.sortdb.as_ref()).block_height; - let bob_delegate_stack_nonce = bob_nonce; - let bob_delegate_stack = make_pox_4_delegate_stack_stx( - &bob, - bob_nonce, - alice_principal.clone(), - min_ustx, - bob_pox_addr.clone(), - curr_height as u128, - lock_period, - ); - bob_nonce += 1; - - let bob_aggregation_commit_nonce = bob_nonce; - let signature = make_signer_key_signature( - &bob_pox_addr, - &bob, - next_reward_cycle, - &Pox4SignatureTopic::AggregationCommit, - lock_period, - u128::MAX, - 1, - ); - let bob_aggregation_commit = make_pox_4_aggregation_commit_indexed( - &bob, - bob_aggregation_commit_nonce, - &bob_pox_addr, - next_reward_cycle, - Some(signature), - &bob_signing_key, - u128::MAX, - 1, - ); - bob_nonce += 1; - - latest_block = Some(peer.tenure_with_txs( - &[ - steph_stacking, - steph_stack_increase, - steph_stack_extend, - alice_delegate, - bob_delegate_stack, - bob_aggregation_commit, - ], - &mut coinbase_nonce, - )); - - let tip = get_tip(peer.sortdb.as_ref()); - let tipId = StacksBlockId::new(&tip.consensus_hash, &tip.canonical_stacks_tip_hash); - assert_eq!(tipId, latest_block.unwrap()); - - let in_prepare_phase = burnchain.is_in_prepare_phase(tip.block_height); - assert_eq!(in_prepare_phase, false); - - let blocks = observer.get_blocks(); - let mut steph_txs = HashMap::new(); - let mut alice_txs = HashMap::new(); - let mut bob_txs = HashMap::new(); - - for b in blocks.into_iter() { - for r in b.receipts.into_iter() { - if let TransactionOrigin::Stacks(ref t) = r.transaction { - let addr = t.auth.origin().address_testnet(); - if addr == steph_address { - steph_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } - } - } - } - - assert_eq!(steph_txs.len() as u64, 3); - assert_eq!(alice_txs.len() as u64, 1); - assert_eq!(bob_txs.len() as u64, 2); - - let steph_stack_stx_tx = &steph_txs.get(&steph_stack_stx_nonce); - let steph_stack_extend_tx = &steph_txs.get(&steph_stack_extend_nonce); - let steph_stack_increase_tx = &steph_txs.get(&steph_stack_increase_nonce); - let bob_delegate_stack_stx_tx = &bob_txs.get(&bob_delegate_stack_nonce); - let bob_aggregation_commit_tx = &bob_txs.get(&bob_aggregation_commit_nonce); - let alice_delegate_tx = &alice_txs.get(&alice_delegate_nonce); - - // Check event for stack-stx tx - let steph_stacking_tx_events = &steph_stack_stx_tx.unwrap().clone().events; - assert_eq!(steph_stacking_tx_events.len() as u64, 2); - let steph_stacking_tx_event = &steph_stacking_tx_events[0]; - let steph_stacking_op_data = HashMap::from([ - // matches the expected cycle, since we're not in a prepare phase - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle + lock_period)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "stack-stx".to_string(), - stacker: steph_principal.clone().into(), - balance: Value::UInt(10240000000000), - locked: Value::UInt(0), - burnchain_unlock_height: Value::UInt(0), - }; - check_pox_print_event(steph_stacking_tx_event, common_data, steph_stacking_op_data); - - // Check event for stack-increase tx - let steph_stack_increase_tx_events = &steph_stack_increase_tx.unwrap().clone().events; - assert_eq!(steph_stack_increase_tx_events.len() as u64, 2); - let steph_stack_increase_tx_event = &steph_stack_increase_tx_events[0]; - let steph_stack_increase_op_data = HashMap::from([ - // `stack-increase` is in the same block as `stack-stx`, so we essentially want to be able to override the first event - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle + lock_period)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "stack-increase".to_string(), - stacker: steph_principal.clone().into(), - balance: Value::UInt(10234866375000), - locked: Value::UInt(5133625000), - burnchain_unlock_height: Value::UInt(120), - }; - check_pox_print_event( - steph_stack_increase_tx_event, - common_data, - steph_stack_increase_op_data, - ); - - // Check event for stack-extend tx - let steph_stack_extend_tx_events = &steph_stack_extend_tx.unwrap().clone().events; - assert_eq!(steph_stack_extend_tx_events.len() as u64, 2); - let steph_stack_extend_tx_event = &steph_stack_extend_tx_events[0]; - let steph_stacking_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle + lock_period + 1)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "stack-extend".to_string(), - stacker: steph_principal.clone().into(), - balance: Value::UInt(10234866374900), - locked: Value::UInt(5133625100), - burnchain_unlock_height: Value::UInt(120), - }; - check_pox_print_event( - steph_stack_extend_tx_event, - common_data, - steph_stacking_op_data, - ); - - // Check event for delegate-stx tx - let alice_delegation_tx_events = &alice_delegate_tx.unwrap().clone().events; - assert_eq!(alice_delegation_tx_events.len() as u64, 1); - let alice_delegation_tx_event = &alice_delegation_tx_events[0]; - let alice_delegate_stx_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle + 2)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "delegate-stx".to_string(), - stacker: alice_principal.clone().into(), - balance: Value::UInt(10240000000000), - locked: Value::UInt(0), - burnchain_unlock_height: Value::UInt(0), - }; - check_pox_print_event( - alice_delegation_tx_event, - common_data, - alice_delegate_stx_op_data, - ); - - // Check event for delegate-stack-stx tx - let bob_delegate_stack_stx_tx_events = &bob_delegate_stack_stx_tx.unwrap().clone().events; - assert_eq!(bob_delegate_stack_stx_tx_events.len() as u64, 2); - let bob_delegate_stack_stx_tx_event = &bob_delegate_stack_stx_tx_events[0]; - let bob_delegate_stack_stx_tx_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle + lock_period)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "delegate-stack-stx".to_string(), - stacker: alice_principal.clone().into(), - balance: Value::UInt(10240000000000), - locked: Value::UInt(0), - burnchain_unlock_height: Value::UInt(0), - }; - check_pox_print_event( - bob_delegate_stack_stx_tx_event, - common_data, - bob_delegate_stack_stx_tx_op_data, - ); - - // Check event for aggregation_commit tx - let bob_aggregation_commit_tx_events = &bob_aggregation_commit_tx.unwrap().clone().events; - assert_eq!(bob_aggregation_commit_tx_events.len() as u64, 1); - let bob_aggregation_commit_tx_event = &bob_aggregation_commit_tx_events[0]; - let bob_aggregation_commit_tx_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_reward_cycle)), - ( - "end-cycle-id", - Value::some(Value::UInt(next_reward_cycle)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "stack-aggregation-commit-indexed".to_string(), - stacker: bob_principal.clone().into(), - balance: Value::UInt(10240000000000), - locked: Value::UInt(0), - burnchain_unlock_height: Value::UInt(0), - }; - check_pox_print_event( - bob_aggregation_commit_tx_event, - common_data, - bob_aggregation_commit_tx_op_data, - ); -} - -// This tests calls some pox-4 Clarity functions to check the existence of `start-cycle-id` and `end-cycle-id` -// in emitted pox events. -// In this setup, Steph solo stacks in the prepare phase -#[test] -fn pox_4_check_cycle_id_range_in_print_events_in_prepare_phase() { - // Config for this test - let (epochs, pox_constants) = make_test_epochs_pox(); - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - function_name!(), - Some(epochs.clone()), - Some(&observer), - ); - - assert_eq!(burnchain.pox_constants.reward_slots(), 6); - let mut coinbase_nonce = 0; - let mut latest_block = None; - - let steph_key = keys.pop().unwrap(); - let steph_address = key_to_stacks_addr(&steph_key); - let steph_principal = PrincipalData::from(steph_address.clone()); - let steph_pox_addr_val = - make_pox_addr(AddressHashMode::SerializeP2PKH, steph_address.bytes.clone()); - let steph_pox_addr = pox_addr_from(&steph_key); - let steph_signing_key = Secp256k1PublicKey::from_private(&steph_key); - let steph_key_val = Value::buff_from(steph_signing_key.to_bytes_compressed()).unwrap(); - - let mut steph_nonce = 0; - - // Advance into pox4 - let target_height = burnchain.pox_constants.pox_4_activation_height; - // produce blocks until the first reward phase that everyone should be in - while get_tip(peer.sortdb.as_ref()).block_height < u64::from(target_height) { - latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); - } - // produce blocks until the we're in the prepare phase - while !burnchain.is_in_prepare_phase(get_tip(peer.sortdb.as_ref()).block_height) { - latest_block = Some(peer.tenure_with_txs(&[], &mut coinbase_nonce)); - } - - let steph_balance = get_balance(&mut peer, &steph_principal); - - info!( - "Block height: {}", - get_tip(peer.sortdb.as_ref()).block_height - ); - - let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()) * 120 / 100; // * 1.2 - - // stack-stx - let steph_lock_period = 2; - let current_cycle = get_current_reward_cycle(&peer, &burnchain); - let next_cycle = current_cycle + 1; - let signature = make_signer_key_signature( - &steph_pox_addr, - &steph_key, - current_cycle, - &Pox4SignatureTopic::StackStx, - steph_lock_period, - u128::MAX, - 1, - ); - let steph_stacking = make_pox_4_lockup( - &steph_key, - steph_nonce, - min_ustx, - &steph_pox_addr.clone(), - steph_lock_period, - &steph_signing_key, - get_tip(peer.sortdb.as_ref()).block_height, - Some(signature), - u128::MAX, - 1, + + info!( + "Block height: {}", + get_tip(peer.sortdb.as_ref()).block_height ); - steph_nonce += 1; - latest_block = Some(peer.tenure_with_txs(&[steph_stacking.clone()], &mut coinbase_nonce)); + // Check that STX are not locked for 3 reward cycles after pox4 starts + for _ in 0..3 { + let tip = get_tip(peer.sortdb.as_ref()); + let cycle = burnchain + .block_height_to_reward_cycle(tip.block_height) + .unwrap(); - let txs: HashMap<_, _> = observer - .get_blocks() - .into_iter() - .flat_map(|b| b.receipts) - .filter_map(|r| match r.transaction { - TransactionOrigin::Stacks(ref t) => Some((t.txid(), r.clone())), - _ => None, - }) - .collect(); + info!("Checking no stackers for cycle {cycle}"); + for _ in 0..burnchain.pox_constants.reward_cycle_length { + latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); + assert_latest_was_burn(&mut peer); + } - // Check event for stack-stx tx - let steph_stacking_receipt = txs.get(&steph_stacking.txid()).unwrap().clone(); - assert_eq!(steph_stacking_receipt.events.len(), 2); - let steph_stacking_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_cycle + 1)), // +1 because steph stacked during the prepare phase - ( - "end-cycle-id", - Value::some(Value::UInt(next_cycle + steph_lock_period)).unwrap(), - ), - ]); - let common_data = PoxPrintFields { - op_name: "stack-stx".to_string(), - stacker: steph_principal.clone().into(), - balance: Value::UInt(steph_balance), - locked: Value::UInt(0), - burnchain_unlock_height: Value::UInt(0), - }; - check_pox_print_event( - &steph_stacking_receipt.events[0], - common_data, - steph_stacking_op_data, - ); + info!("Checking that stackers have no STX locked after cycle {cycle}"); + let balances = balances_from_keys(&mut peer, &latest_block, &keys); + assert_eq!(balances[0].amount_locked(), 0); + assert_eq!(balances[1].amount_locked(), 0); + } } // test that revoke-delegate-stx calls emit an event and @@ -1902,10 +1337,7 @@ fn pox_4_revoke_delegate_stx_events() { get_tip(peer.sortdb.as_ref()).block_height ); let block_height = get_tip(peer.sortdb.as_ref()).block_height; - let current_cycle = get_current_reward_cycle(&peer, &burnchain); - let next_cycle = current_cycle + 1; let min_ustx = get_stacking_minimum(&mut peer, &latest_block.unwrap()); - let steph_stacking = make_pox_4_contract_call( &steph, 0, @@ -1997,14 +1429,10 @@ fn pox_4_revoke_delegate_stx_events() { let revoke_delegation_tx_events = &alice_txs.get(&alice_revoke_nonce).unwrap().clone().events; assert_eq!(revoke_delegation_tx_events.len() as u64, 1); let revoke_delegation_tx_event = &revoke_delegation_tx_events[0]; - let revoke_delegate_stx_op_data = HashMap::from([ - ("start-cycle-id", Value::UInt(next_cycle)), - ("end-cycle-id", Optional(OptionalData { data: None })), - ( - "delegate-to", - Value::Principal(PrincipalData::from(bob_address.clone())), - ), - ]); + let revoke_delegate_stx_op_data = HashMap::from([( + "delegate-to", + Value::Principal(PrincipalData::from(bob_address.clone())), + )]); let common_data = PoxPrintFields { op_name: "revoke-delegate-stx".to_string(), stacker: alice_principal.clone().into(), @@ -4883,7 +4311,7 @@ fn stack_increase() { alice_nonce, min_ustx, &signing_pk, - Some(signature.clone()), + Some(signature), u128::MAX, 1, ); @@ -4894,8 +4322,6 @@ fn stack_increase() { let actual_result = stacker_transactions.first().cloned().unwrap().result; - let increase_event = &stacker_transactions.first().cloned().unwrap().events[0]; - let expected_result = Value::okay(Value::Tuple( TupleData::from_data(vec![ ( @@ -4908,29 +4334,6 @@ fn stack_increase() { )) .unwrap(); - let increase_op_data = HashMap::from([ - ( - "signer-sig", - Value::some(Value::buff_from(signature).unwrap()).unwrap(), - ), - ( - "signer-key", - Value::buff_from(signing_pk.to_bytes_compressed()).unwrap(), - ), - ("max-amount", Value::UInt(u128::MAX)), - ("auth-id", Value::UInt(1)), - ]); - - let common_data = PoxPrintFields { - op_name: "stack-increase".to_string(), - stacker: Value::Principal(PrincipalData::from(alice_address.clone())), - balance: Value::UInt(10234866375000), - locked: Value::UInt(5133625000), - burnchain_unlock_height: Value::UInt(125), - }; - - check_pox_print_event(&increase_event, common_data, increase_op_data); - // Testing stack_increase response is equal to expected response // Test is straightforward because 'stack-increase' in PoX-4 is the same as PoX-3 assert_eq!(actual_result, expected_result); @@ -5339,350 +4742,3 @@ pub fn get_last_block_sender_transactions( }) .collect::>() } - -/// In this test case, two Stackers, Alice and Bob stack in PoX 4. Alice stacks enough -/// to qualify for slots, but Bob does not. In PoX-2 and PoX-3, this would result -/// in an auto unlock, but PoX-4 it should not. -#[test] -fn missed_slots_no_unlock() { - let EXPECTED_FIRST_V2_CYCLE = 8; - // the sim environment produces 25 empty sortitions before - // tenures start being tracked. - let EMPTY_SORTITIONS = 25; - - let (epochs, mut pox_constants) = make_test_epochs_pox(); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - &function_name!(), - Some(epochs.clone()), - Some(&observer), - ); - - peer.config.check_pox_invariants = None; - - let alice = keys.pop().unwrap(); - let bob = keys.pop().unwrap(); - let alice_address = key_to_stacks_addr(&alice); - let bob_address = key_to_stacks_addr(&bob); - - let mut coinbase_nonce = 0; - - let first_v4_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) - .unwrap() - + 1; - - // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { - peer.tenure_with_txs(&[], &mut coinbase_nonce); - } - - // perform lockups so we can test that pox-4 does not exhibit unlock-on-miss behavior - let tip = get_tip(peer.sortdb.as_ref()); - - let alice_lockup = - make_simple_pox_4_lock(&alice, &mut peer, 1024 * POX_THRESHOLD_STEPS_USTX, 6); - - let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); - - let txs = [alice_lockup, bob_lockup]; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - - // check that the "raw" reward set will contain entries for alice and bob - // for the pox-4 cycles - for cycle_number in first_v4_cycle..first_v4_cycle + 6 { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!( - reward_set_entries.len(), - 2, - "Reward set should contain two entries in cycle {cycle_number}" - ); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); - } - - // we'll produce blocks until the next reward cycle gets through the "handled start" code - // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle) + 1; - let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; - - // but first, check that bob has locked tokens at (height_target + 1) - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - while get_tip(peer.sortdb.as_ref()).block_height < height_target { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - } - - // check that the "raw" reward sets for all cycles contain entries for alice and bob still! - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!(reward_set_entries.len(), 2); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); - assert_eq!( - reward_set_entries[1].reward_address.bytes(), - alice_address.bytes.0.to_vec() - ); - } - - let expected_unlock_height = burnchain.reward_cycle_to_block_height(first_v4_cycle + 6) - 1; - // now check that bob has an unlock height of `height_target` - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - let alice_bal = get_stx_account_at( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), - ); - assert_eq!(alice_bal.unlock_height(), expected_unlock_height); - assert_eq!(alice_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX * 1024); - - // check that the total reward cycle amounts have not decremented - for cycle_number in first_v4_cycle..(first_v4_cycle + 6) { - assert_eq!( - get_reward_cycle_total(&mut peer, &latest_block, cycle_number), - 1025 * POX_THRESHOLD_STEPS_USTX - ); - } - - // check that bob's stacking-state is gone and alice's stacking-state is correct - let bob_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Bob should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = bob_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u1 u1 u1 u1 u1 u1)"); - - let alice_state = get_stacking_state_pox( - &mut peer, - &latest_block, - &alice_address.to_account_principal(), - PoxVersions::Pox4.get_name_str(), - ) - .expect("Alice should have stacking-state entry") - .expect_tuple() - .unwrap(); - let reward_indexes_str = alice_state.get("reward-set-indexes").unwrap().to_string(); - assert_eq!(reward_indexes_str, "(u0 u0 u0 u0 u0 u0)"); - - // check that bob is still locked at next block - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.unlock_height(), expected_unlock_height); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - // now let's check some tx receipts - - let blocks = observer.get_blocks(); - - let mut alice_txs = HashMap::new(); - let mut bob_txs = HashMap::new(); - let mut coinbase_txs = vec![]; - let mut reward_cycles_in_2_5 = 0u64; - - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - let signers_set = reward_set_data.reward_set.signers.as_ref().unwrap(); - assert_eq!(signers_set.len(), 1); - assert_eq!( - StacksPublicKey::from_private(&alice).to_bytes_compressed(), - signers_set[0].signing_key.to_vec() - ); - let rewarded_addrs = HashSet::<_>::from_iter( - reward_set_data - .reward_set - .rewarded_addresses - .iter() - .map(|a| a.to_burnchain_repr()), - ); - assert_eq!(rewarded_addrs.len(), 1); - assert_eq!( - reward_set_data.reward_set.rewarded_addresses[0].bytes(), - alice_address.bytes.0.to_vec(), - ); - reward_cycles_in_2_5 += 1; - eprintln!("{:?}", b.reward_set_data) - } - - for (i, r) in b.receipts.into_iter().enumerate() { - if i == 0 { - coinbase_txs.push(r); - continue; - } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } - } - _ => {} - } - } - } - - assert_eq!(alice_txs.len(), 1); - assert_eq!(bob_txs.len(), 1); - // only mined one 2.5 reward cycle, but make sure it was picked up in the events loop above - assert_eq!(reward_cycles_in_2_5, 1); - - // all should have committedd okay - assert!( - match bob_txs.get(&0).unwrap().result { - Value::Response(ref r) => r.committed, - _ => false, - }, - "Bob tx0 should have committed okay" - ); - - // Check that the event produced by "handle-unlock" has a well-formed print event - // and that this event is included as part of the coinbase tx - for unlock_coinbase_index in [auto_unlock_coinbase] { - // expect the unlock to occur 1 block after the handle-unlock method was invoked. - let expected_unlock_height = unlock_coinbase_index + EMPTY_SORTITIONS + 1; - let expected_cycle = pox_constants - .block_height_to_reward_cycle(0, expected_unlock_height) - .unwrap(); - assert!( - coinbase_txs[unlock_coinbase_index as usize].events.is_empty(), - "handle-unlock events are coinbase events and there should be no handle-unlock invocation in this test" - ); - } -} - -/// In this test case, we lockup enough to get participation to be non-zero, but not enough to qualify for a reward slot. -#[test] -fn no_lockups_2_5() { - let EXPECTED_FIRST_V2_CYCLE = 8; - // the sim environment produces 25 empty sortitions before - // tenures start being tracked. - let EMPTY_SORTITIONS = 25; - - let (epochs, mut pox_constants) = make_test_epochs_pox(); - pox_constants.pox_4_activation_height = u32::try_from(epochs[7].start_height).unwrap() + 1; - - let mut burnchain = Burnchain::default_unittest( - 0, - &BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(), - ); - burnchain.pox_constants = pox_constants.clone(); - - let observer = TestEventObserver::new(); - - let (mut peer, mut keys) = instantiate_pox_peer_with_epoch( - &burnchain, - &function_name!(), - Some(epochs.clone()), - Some(&observer), - ); - - peer.config.check_pox_invariants = None; - - let alice = keys.pop().unwrap(); - let bob = keys.pop().unwrap(); - let alice_address = key_to_stacks_addr(&alice); - let bob_address = key_to_stacks_addr(&bob); - - let mut coinbase_nonce = 0; - - let first_v4_cycle = burnchain - .block_height_to_reward_cycle(burnchain.pox_constants.pox_4_activation_height as u64) - .unwrap() - + 1; - - // produce blocks until epoch 2.5 - while get_tip(peer.sortdb.as_ref()).block_height <= epochs[7].start_height { - peer.tenure_with_txs(&[], &mut coinbase_nonce); - } - - let tip = get_tip(peer.sortdb.as_ref()); - - let bob_lockup = make_simple_pox_4_lock(&bob, &mut peer, 1 * POX_THRESHOLD_STEPS_USTX, 6); - - let txs = [bob_lockup]; - let mut latest_block = peer.tenure_with_txs(&txs, &mut coinbase_nonce); - - // check that the "raw" reward set will contain an entry for bob - for cycle_number in first_v4_cycle..first_v4_cycle + 6 { - let cycle_start = burnchain.reward_cycle_to_block_height(cycle_number); - let reward_set_entries = get_reward_set_entries_at(&mut peer, &latest_block, cycle_start); - assert_eq!( - reward_set_entries.len(), - 1, - "Reward set should contain one entry in cycle {cycle_number}" - ); - assert_eq!( - reward_set_entries[0].reward_address.bytes(), - bob_address.bytes.0.to_vec() - ); - } - - // we'll produce blocks until the next reward cycle gets through the "handled start" code - // this is one block after the reward cycle starts - let height_target = burnchain.reward_cycle_to_block_height(first_v4_cycle + 1) + 1; - let auto_unlock_coinbase = height_target - 1 - EMPTY_SORTITIONS; - - // but first, check that bob has locked tokens at (height_target + 1) - let bob_bal = get_stx_account_at( - &mut peer, - &latest_block, - &bob_address.to_account_principal(), - ); - assert_eq!(bob_bal.amount_locked(), POX_THRESHOLD_STEPS_USTX); - - while get_tip(peer.sortdb.as_ref()).block_height < height_target { - latest_block = peer.tenure_with_txs(&[], &mut coinbase_nonce); - } - - let blocks = observer.get_blocks(); - for b in blocks.into_iter() { - if let Some(ref reward_set_data) = b.reward_set_data { - assert_eq!(reward_set_data.reward_set.signers, Some(vec![])); - assert!(reward_set_data.reward_set.rewarded_addresses.is_empty()); - eprintln!("{:?}", b.reward_set_data) - } - } -} diff --git a/stackslib/src/chainstate/stacks/boot/signers-voting.clar b/stackslib/src/chainstate/stacks/boot/signers-voting.clar index 1fea756e930..a5ca2003045 100644 --- a/stackslib/src/chainstate/stacks/boot/signers-voting.clar +++ b/stackslib/src/chainstate/stacks/boot/signers-voting.clar @@ -24,9 +24,9 @@ (define-constant pox-info (unwrap-panic (contract-call? .pox-4 get-pox-info))) -;; Threshold consensus, expressed as parts-per-hundred to allow for integer -;; division with higher precision (e.g. 70 for 70%). -(define-constant threshold-consensus u70) +;; Threshold consensus, expressed as parts-per-thousand to allow for integer +;; division with higher precision (e.g. 700 for 70%). +(define-constant threshold-consensus u700) ;; Maps reward-cycle ids to last round (define-map rounds uint uint) @@ -39,9 +39,6 @@ ;; necessary to recalculate it on every vote. (define-map cycle-total-weight uint uint) -;; Maps voting data (count, current weight) per reward cycle & round -(define-map round-data {reward-cycle: uint, round: uint} {votes-count: uint, votes-weight: uint}) - (define-read-only (burn-height-to-reward-cycle (height uint)) (/ (- height (get first-burnchain-block-height pox-info)) (get reward-cycle-length pox-info))) @@ -57,9 +54,6 @@ (define-read-only (get-vote (reward-cycle uint) (round uint) (signer principal)) (map-get? votes {reward-cycle: reward-cycle, round: round, signer: signer})) -(define-read-only (get-round-info (reward-cycle uint) (round uint)) - (map-get? round-data {reward-cycle: reward-cycle, round: round})) - (define-read-only (get-candidate-info (reward-cycle uint) (round uint) (candidate (buff 33))) {candidate-weight: (default-to u0 (map-get? tally {reward-cycle: reward-cycle, round: round, aggregate-public-key: candidate})), total-weight: (map-get? cycle-total-weight reward-cycle)}) @@ -87,11 +81,6 @@ (define-read-only (get-approved-aggregate-key (reward-cycle uint)) (map-get? aggregate-public-keys reward-cycle)) -;; get the weight required for consensus threshold -(define-read-only (get-threshold-weight (reward-cycle uint)) - (let ((total-weight (default-to u0 (map-get? cycle-total-weight reward-cycle)))) - (/ (+ (* total-weight threshold-consensus) u99) u100))) - (define-private (is-in-voting-window (height uint) (reward-cycle uint)) (let ((last-cycle (unwrap-panic (contract-call? .signers get-last-set-cycle)))) (and (is-eq last-cycle reward-cycle) @@ -145,12 +134,7 @@ ;; vote by signer weight (signer-weight (try! (get-signer-weight signer-index reward-cycle))) (new-total (+ signer-weight (default-to u0 (map-get? tally tally-key)))) - (cached-weight (try! (get-and-cache-total-weight reward-cycle))) - (threshold-weight (get-threshold-weight reward-cycle)) - (current-round (default-to { - votes-count: u0, - votes-weight: u0} (map-get? round-data {reward-cycle: reward-cycle, round: round}))) - ) + (total-weight (try! (get-and-cache-total-weight reward-cycle)))) ;; Check that the key has not yet been set for this reward cycle (asserts! (is-none (map-get? aggregate-public-keys reward-cycle)) (err ERR_OUT_OF_VOTING_WINDOW)) ;; Check that the aggregate public key is the correct length @@ -163,10 +147,6 @@ (try! (update-last-round reward-cycle round)) ;; Update the tally for this aggregate public key candidate (map-set tally tally-key new-total) - ;; Update the current round data - (map-set round-data {reward-cycle: reward-cycle, round: round} { - votes-count: (+ (get votes-count current-round) u1), - votes-weight: (+ (get votes-weight current-round) signer-weight)}) ;; Update used aggregate public keys (map-set used-aggregate-public-keys key reward-cycle) (print { @@ -178,7 +158,7 @@ new-total: new-total, }) ;; If the new total weight is greater than or equal to the threshold consensus - (if (>= new-total threshold-weight) + (if (>= (/ (* new-total u1000) total-weight) threshold-consensus) ;; Save this approved aggregate public key for this reward cycle. ;; If there is not already a key for this cycle, the insert will ;; return true and an event will be created. diff --git a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs index aef41ef4a51..5ac7d461c21 100644 --- a/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/signers_voting_tests.rs @@ -64,9 +64,7 @@ use crate::chainstate::stacks::boot::pox_2_tests::{ use crate::chainstate::stacks::boot::pox_4_tests::{ assert_latest_was_burn, get_last_block_sender_transactions, get_tip, make_test_epochs_pox, }; -use crate::chainstate::stacks::boot::signers_tests::{ - get_signer_index, prepare_signers_test, readonly_call, -}; +use crate::chainstate::stacks::boot::signers_tests::{get_signer_index, prepare_signers_test}; use crate::chainstate::stacks::boot::{ BOOT_CODE_COST_VOTING_TESTNET as BOOT_CODE_COST_VOTING, BOOT_CODE_POX_TESTNET, SIGNERS_NAME, SIGNERS_VOTING_NAME, @@ -2050,123 +2048,6 @@ fn vote_for_aggregate_public_key_mixed_rounds() { assert_eq!(alice_vote_tx.events.len(), 0); } -// In this test case, Alice & Bob advance through setup & check -// the round info from the very first reward cycle & round. -#[test] -fn test_get_round_info() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - let round_info = get_round_info(&mut peer, latest_block_id, cycle_id, 0) - .unwrap() - .expect_tuple() - .unwrap(); - let votes_count = round_info.get("votes-count").unwrap(); - let votes_weight = round_info.get("votes-weight").unwrap(); - - assert_eq!(votes_count, &Value::UInt(2)); - assert_eq!(votes_weight, &Value::UInt(4)); -} - -pub fn get_round_info( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, - round: u128, -) -> Option { - let round_tuple = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-round-info".into(), - vec![Value::UInt(reward_cycle), Value::UInt(round)], - ) - .expect_optional() - .unwrap(); - round_tuple -} - -// In this test case, Alice & Bob advance through setup & check -// the weight threshold info from the very first reward cycle & round. -#[test] -fn test_get_threshold_weight() { - // Test setup - let alice = TestStacker::from_seed(&[3, 4]); - let bob = TestStacker::from_seed(&[5, 6]); - let observer = TestEventObserver::new(); - - // Alice - Signer 1 - let alice_key = &alice.signer_private_key; - let alice_address = key_to_stacks_addr(alice_key); - let alice_principal = PrincipalData::from(alice_address); - - // Bob - Signer 2 - let bob_key = &bob.signer_private_key; - let bob_address = key_to_stacks_addr(bob_key); - let bob_principal = PrincipalData::from(bob_address); - - let (mut peer, test_signers, latest_block_id, current_reward_cycle) = prepare_signers_test( - function_name!(), - vec![ - (alice_principal.clone(), 1000), - (bob_principal.clone(), 1000), - ], - &[alice.clone(), bob.clone()], - Some(&observer), - ); - - // Get the current creward cycle - let cycle_id = current_reward_cycle; - - // Call get-threshold-weight - let threshold_weight: u128 = get_threshold_weight(&mut peer, latest_block_id, cycle_id); - - // Since there are four votes, the threshold weight should be 3 (75% of 4) - assert_eq!(threshold_weight, 3); -} - -pub fn get_threshold_weight( - peer: &mut TestPeer<'_>, - latest_block_id: StacksBlockId, - reward_cycle: u128, -) -> u128 { - let threshold_weight = readonly_call( - peer, - &latest_block_id, - "signers-voting".into(), - "get-threshold-weight".into(), - vec![Value::UInt(reward_cycle)], - ) - .expect_u128() - .unwrap(); - threshold_weight -} - fn nakamoto_tenure( peer: &mut TestPeer, test_signers: &mut TestSigners, diff --git a/stackslib/src/chainstate/stacks/db/accounts.rs b/stackslib/src/chainstate/stacks/db/accounts.rs index 7a10503b878..69dd14a1af2 100644 --- a/stackslib/src/chainstate/stacks/db/accounts.rs +++ b/stackslib/src/chainstate/stacks/db/accounts.rs @@ -1152,7 +1152,6 @@ mod test { vec![], vec![], vec![], - vec![], parent_header_info.anchored_header.height() + 1, ) .unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index dd70fcfb01b..f55168171c3 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -37,7 +37,6 @@ use rand::{thread_rng, Rng, RngCore}; use rusqlite::{Connection, DatabaseName, Error as sqlite_error, OptionalExtension}; use serde::Serialize; use serde_json::json; -use stacks_common::bitvec::BitVec; use stacks_common::codec::{read_next, write_next, MAX_MESSAGE_LEN}; use stacks_common::types::chainstate::{ BurnchainHeaderHash, SortitionId, StacksAddress, StacksBlockId, @@ -78,7 +77,6 @@ use crate::util_lib::db::{ query_count, query_int, query_row, query_row_columns, query_row_panic, query_rows, tx_busy_handler, u64_to_sql, DBConn, Error as db_error, FromColumn, FromRow, }; -use crate::util_lib::signed_structured_data::pox4::Pox4SignatureTopic; use crate::util_lib::strings::StacksString; #[derive(Debug, Clone, PartialEq)] @@ -161,7 +159,6 @@ pub struct SetupBlockResult<'a, 'b> { pub burn_transfer_stx_ops: Vec, pub auto_unlock_events: Vec, pub burn_delegate_stx_ops: Vec, - pub burn_vote_for_aggregate_key_ops: Vec, /// Result of a signer set calculation if one occurred pub signer_set_calc: Option, } @@ -185,7 +182,6 @@ impl BlockEventDispatcher for DummyEventDispatcher { _confirmed_mblock_cost: &ExecutionCost, _pox_constants: &PoxConstants, _reward_set_data: &Option, - _signer_bitvec: &Option>, ) { assert!( false, @@ -4153,37 +4149,23 @@ impl StacksChainState { burn_header_hash, .. } = &stack_stx_op; - - let mut args = vec![ - Value::UInt(*stacked_ustx), - // this .expect() should be unreachable since we coerce the hash mode when - // we parse the StackStxOp from a burnchain transaction - reward_addr - .as_clarity_tuple() - .expect("FATAL: stack-stx operation has no hash mode") - .into(), - Value::UInt(u128::from(*block_height)), - Value::UInt(u128::from(*num_cycles)), - ]; - // Appending additional signer related arguments for pox-4 - if active_pox_contract == PoxVersions::Pox4.get_name() { - match StacksChainState::collect_pox_4_stacking_args(&stack_stx_op) { - Ok(pox_4_args) => { - args.extend(pox_4_args); - } - Err(e) => { - warn!("Skipping StackStx operation for txid: {}, burn_block: {} because of failure in collecting pox-4 stacking args: {}", txid, burn_header_hash, e); - continue; - } - } - } let result = clarity_tx.connection().as_transaction(|tx| { tx.run_contract_call( &sender.clone().into(), None, &boot_code_id(active_pox_contract, mainnet), "stack-stx", - &args, + &[ + Value::UInt(*stacked_ustx), + // this .expect() should be unreachable since we coerce the hash mode when + // we parse the StackStxOp from a burnchain transaction + reward_addr + .as_clarity_tuple() + .expect("FATAL: stack-stx operation has no hash mode") + .into(), + Value::UInt(u128::from(*block_height)), + Value::UInt(u128::from(*num_cycles)), + ], |_, _| false, ) }); @@ -4237,35 +4219,6 @@ impl StacksChainState { all_receipts } - pub fn collect_pox_4_stacking_args(op: &StackStxOp) -> Result, String> { - let signer_key = match op.signer_key { - Some(signer_key) => match Value::buff_from(signer_key.as_bytes().to_vec()) { - Ok(signer_key) => signer_key, - Err(_) => { - return Err("Invalid signer_key".into()); - } - }, - _ => return Err("Invalid signer key".into()), - }; - - let max_amount_value = match op.max_amount { - Some(max_amount) => Value::UInt(max_amount), - None => return Err("Missing max_amount".into()), - }; - - let auth_id_value = match op.auth_id { - Some(auth_id) => Value::UInt(u128::from(auth_id)), - None => return Err("Missing auth_id".into()), - }; - - Ok(vec![ - Value::none(), - signer_key, - max_amount_value, - auth_id_value, - ]) - } - /// Process any STX transfer bitcoin operations /// that haven't been processed in this Stacks fork yet. pub fn process_transfer_ops( @@ -4436,113 +4389,6 @@ impl StacksChainState { all_receipts } - pub fn process_vote_for_aggregate_key_ops( - clarity_tx: &mut ClarityTx, - operations: Vec, - ) -> Vec { - let mut all_receipts = vec![]; - let mainnet = clarity_tx.config.mainnet; - let cost_so_far = clarity_tx.cost_so_far(); - for vote_for_aggregate_key_op in operations.into_iter() { - let VoteForAggregateKeyOp { - sender, - aggregate_key, - round, - reward_cycle, - signer_index, - signer_key, - block_height, - txid, - burn_header_hash, - .. - } = &vote_for_aggregate_key_op; - debug!("Processing VoteForAggregateKey burn op"; - "round" => round, - "reward_cycle" => reward_cycle, - "signer_index" => signer_index, - "signer_key" => signer_key.to_hex(), - "burn_block_height" => block_height, - "sender" => %sender, - "aggregate_key" => aggregate_key.to_hex(), - "txid" => %txid - ); - let result = clarity_tx.connection().as_transaction(|tx| { - tx.run_contract_call( - &sender.clone().into(), - None, - &boot_code_id(SIGNERS_VOTING_NAME, mainnet), - "vote-for-aggregate-public-key", - &[ - Value::UInt(signer_index.clone().into()), - Value::buff_from(aggregate_key.as_bytes().to_vec()).unwrap(), - Value::UInt(round.clone().into()), - Value::UInt(reward_cycle.clone().into()), - ], - |_, _| false, - ) - }); - match result { - Ok((value, _, events)) => { - if let Value::Response(ref resp) = value { - if !resp.committed { - info!("VoteForAggregateKey burn op rejected by signers-voting contract."; - "txid" => %txid, - "burn_block" => %burn_header_hash, - "contract_call_ecode" => %resp.data); - } else { - let aggregate_key_fmt = format!("{:?}", aggregate_key.to_hex()); - let signer_key_fmt = format!("{:?}", signer_key.to_hex()); - info!("Processed VoteForAggregateKey burnchain op"; - "resp" => %resp.data, - "round" => round, - "reward_cycle" => reward_cycle, - "signer_index" => signer_index, - "signer_key" => signer_key_fmt, - "burn_block_height" => block_height, - "sender" => %sender, - "aggregate_key" => aggregate_key_fmt, - "txid" => %txid); - } - let mut execution_cost = clarity_tx.cost_so_far(); - execution_cost - .sub(&cost_so_far) - .expect("BUG: cost declined between executions"); - - let receipt = StacksTransactionReceipt { - transaction: TransactionOrigin::Burn( - BlockstackOperationType::VoteForAggregateKey( - vote_for_aggregate_key_op, - ), - ), - events, - result: value, - post_condition_aborted: false, - stx_burned: 0, - contract_analysis: None, - execution_cost, - microblock_header: None, - tx_index: 0, - vm_error: None, - }; - - all_receipts.push(receipt); - } else { - unreachable!( - "BUG: Non-response value returned by VoteForAggregateKey burnchain op" - ) - } - } - Err(e) => { - info!("VoteForAggregateKey burn op processing error."; - "error" => %format!("{:?}", e), - "txid" => %txid, - "burn_block" => %burn_header_hash); - } - }; - } - all_receipts - } - /// Process a single anchored block. /// Return the fees and burns. pub fn process_block_transactions( @@ -4749,15 +4595,7 @@ impl StacksChainState { burn_tip: &BurnchainHeaderHash, burn_tip_height: u64, epoch_start_height: u64, - ) -> Result< - ( - Vec, - Vec, - Vec, - Vec, - ), - Error, - > { + ) -> Result<(Vec, Vec, Vec), Error> { // only consider transactions in Stacks 2.1 let search_window: u8 = if epoch_start_height + u64::from(BURNCHAIN_TX_SEARCH_WINDOW) > burn_tip_height { @@ -4796,15 +4634,12 @@ impl StacksChainState { let mut all_stacking_burn_ops = vec![]; let mut all_transfer_burn_ops = vec![]; let mut all_delegate_burn_ops = vec![]; - let mut all_vote_for_aggregate_key_ops = vec![]; // go from oldest burn header hash to newest for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh)?; let transfer_ops = SortitionDB::get_transfer_stx_ops(sortdb_conn, ancestor_bhh)?; let delegate_ops = SortitionDB::get_delegate_stx_ops(sortdb_conn, ancestor_bhh)?; - let vote_for_aggregate_key_ops = - SortitionDB::get_vote_for_aggregate_key_ops(sortdb_conn, ancestor_bhh)?; for stacking_op in stacking_ops.into_iter() { if !processed_burnchain_txids.contains(&stacking_op.txid) { @@ -4823,18 +4658,11 @@ impl StacksChainState { all_delegate_burn_ops.push(delegate_op); } } - - for vote_op in vote_for_aggregate_key_ops.into_iter() { - if !processed_burnchain_txids.contains(&vote_op.txid) { - all_vote_for_aggregate_key_ops.push(vote_op); - } - } } Ok(( all_stacking_burn_ops, all_transfer_burn_ops, all_delegate_burn_ops, - all_vote_for_aggregate_key_ops, )) } @@ -4862,23 +4690,13 @@ impl StacksChainState { /// The change in Stacks 2.1+ makes it so that it's overwhelmingly likely to work /// the first time -- the choice of K is significantly bigger than the length of short-lived /// forks or periods of time with no sortition than have been observed in practice. - /// - /// In epoch 2.5+, the vote-for-aggregate-key op is included pub fn get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx: &mut ChainstateTx, parent_index_hash: &StacksBlockId, sortdb_conn: &Connection, burn_tip: &BurnchainHeaderHash, burn_tip_height: u64, - ) -> Result< - ( - Vec, - Vec, - Vec, - Vec, - ), - Error, - > { + ) -> Result<(Vec, Vec, Vec), Error> { let cur_epoch = SortitionDB::get_stacks_epoch(sortdb_conn, burn_tip_height)? .expect("FATAL: no epoch defined for current burnchain tip height"); @@ -4893,24 +4711,14 @@ impl StacksChainState { burn_tip, )?; // The DelegateStx bitcoin wire format does not exist before Epoch 2.1. - Ok((stack_ops, transfer_ops, vec![], vec![])) + Ok((stack_ops, transfer_ops, vec![])) } StacksEpochId::Epoch21 | StacksEpochId::Epoch22 | StacksEpochId::Epoch23 - | StacksEpochId::Epoch24 => { - let (stack_ops, transfer_ops, delegate_ops, _) = - StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( - chainstate_tx, - parent_index_hash, - sortdb_conn, - burn_tip, - burn_tip_height, - cur_epoch.start_height, - )?; - Ok((stack_ops, transfer_ops, delegate_ops, vec![])) - } - StacksEpochId::Epoch25 | StacksEpochId::Epoch30 => { + | StacksEpochId::Epoch24 + | StacksEpochId::Epoch25 + | StacksEpochId::Epoch30 => { // TODO: sbtc ops in epoch 3.0 StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( chainstate_tx, @@ -5069,7 +4877,7 @@ impl StacksChainState { (latest_miners, parent_miner) }; - let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops, vote_for_agg_key_burn_ops) = + let (stacking_burn_ops, transfer_burn_ops, delegate_burn_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops( chainstate_tx, &parent_index_hash, @@ -5274,13 +5082,6 @@ impl StacksChainState { &chain_tip.anchored_header.block_hash() ); } - // Vote for aggregate pubkey ops are allowed from epoch 2.5 onward - if evaluated_epoch >= StacksEpochId::Epoch25 { - tx_receipts.extend(StacksChainState::process_vote_for_aggregate_key_ops( - &mut clarity_tx, - vote_for_agg_key_burn_ops.clone(), - )); - } debug!( "Setup block: ready to go for {}/{}", @@ -5301,7 +5102,6 @@ impl StacksChainState { burn_transfer_stx_ops: transfer_burn_ops, auto_unlock_events, burn_delegate_stx_ops: delegate_burn_ops, - burn_vote_for_aggregate_key_ops: vote_for_agg_key_burn_ops, signer_set_calc, }) } @@ -5506,7 +5306,6 @@ impl StacksChainState { mut auto_unlock_events, burn_delegate_stx_ops, signer_set_calc, - burn_vote_for_aggregate_key_ops, } = StacksChainState::setup_block( chainstate_tx, clarity_instance, @@ -5819,7 +5618,6 @@ impl StacksChainState { burn_stack_stx_ops, burn_transfer_stx_ops, burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, affirmation_weight, ) .expect("FATAL: failed to advance chain tip"); @@ -6075,40 +5873,6 @@ impl StacksChainState { } }; - let microblocks_disabled_by_epoch_25 = - SortitionDB::are_microblocks_disabled(sort_tx.tx(), u64::from(burn_header_height))?; - - // microblocks are not allowed after Epoch 2.5 starts - if microblocks_disabled_by_epoch_25 { - if next_staging_block.parent_microblock_seq != 0 - || next_staging_block.parent_microblock_hash != BlockHeaderHash([0; 32]) - { - let msg = format!( - "Invalid stacks block {}/{} ({}). Confirms microblocks after Epoch 2.5 start.", - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - &StacksBlockId::new( - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash - ), - ); - warn!("{msg}"); - - // clear out - StacksChainState::set_block_processed( - chainstate_tx.deref_mut(), - None, - &blocks_path, - &next_staging_block.consensus_hash, - &next_staging_block.anchored_block_hash, - false, - )?; - chainstate_tx.commit().map_err(Error::DBError)?; - - return Err(Error::InvalidStacksBlock(msg)); - } - } - debug!( "Process staging block {}/{} in burn block {}, parent microblock {}", next_staging_block.consensus_hash, @@ -6378,7 +6142,6 @@ impl StacksChainState { &epoch_receipt.parent_microblocks_cost, &pox_constants, &reward_set_data, - &None, ); } @@ -10272,7 +10035,6 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &peer_config.burnchain, ) .unwrap(); @@ -10454,7 +10216,6 @@ pub mod test { #[test] fn test_get_parent_block_header() { let peer_config = TestPeerConfig::new(function_name!(), 21313, 21314); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -10524,7 +10285,6 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -11002,7 +10762,6 @@ pub mod test { epochs[num_epochs - 1].block_limit.runtime = 10_000_000; peer_config.epochs = Some(epochs); peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -11080,7 +10839,6 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -11188,7 +10946,7 @@ pub mod test { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let (stack_stx_ops, transfer_stx_ops, delegate_stx_ops, vote_for_aggregate_key_ops) = + let (stack_stx_ops, transfer_stx_ops, delegate_stx_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( &mut chainstate_tx, &last_block_id, @@ -11328,7 +11086,6 @@ pub mod test { epochs[num_epochs - 1].block_limit.read_length = 10_000_000; peer_config.epochs = Some(epochs); peer_config.burnchain.pox_constants.v1_unlock_height = 26; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -11403,7 +11160,6 @@ pub mod test { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -11871,7 +11627,7 @@ pub mod test { let chainstate = peer.chainstate(); let (mut chainstate_tx, clarity_instance) = chainstate.chainstate_tx_begin().unwrap(); - let (stack_stx_ops, transfer_stx_ops, delegate_stx_ops, _) = + let (stack_stx_ops, transfer_stx_ops, delegate_stx_ops) = StacksChainState::get_stacking_and_transfer_and_delegate_burn_ops_v210( &mut chainstate_tx, &last_block_id, diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 374fc11ae13..3e48a3f4716 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -47,9 +47,7 @@ use stacks_common::util::hash::{hex_bytes, to_hex}; use crate::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddress}; use crate::burnchains::{Address, Burnchain, BurnchainParameters, PoxConstants}; use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB, SortitionDBConn, *}; -use crate::chainstate::burn::operations::{ - DelegateStxOp, StackStxOp, TransferStxOp, VoteForAggregateKeyOp, -}; +use crate::chainstate::burn::operations::{DelegateStxOp, StackStxOp, TransferStxOp}; use crate::chainstate::burn::{ConsensusHash, ConsensusHashExtensions}; use crate::chainstate::nakamoto::{ HeaderTypeNames, NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, @@ -1780,7 +1778,7 @@ impl StacksChainState { .to_string(); let nakamoto_staging_blocks_path = - StacksChainState::static_get_nakamoto_staging_blocks_path(path.clone())?; + StacksChainState::get_nakamoto_staging_blocks_path(path.clone())?; let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; @@ -2480,7 +2478,6 @@ impl StacksChainState { burn_stack_stx_ops: Vec, burn_transfer_stx_ops: Vec, burn_delegate_stx_ops: Vec, - burn_vote_for_aggregate_key_ops: Vec, ) -> Result<(), Error> { let mut txids: Vec<_> = burn_stack_stx_ops .into_iter() @@ -2507,16 +2504,6 @@ impl StacksChainState { txids.append(&mut delegate_txids); - let mut vote_txids = - burn_vote_for_aggregate_key_ops - .into_iter() - .fold(vec![], |mut txids, op| { - txids.push(op.txid); - txids - }); - - txids.append(&mut vote_txids); - let txids_json = serde_json::to_string(&txids).expect("FATAL: could not serialize Vec"); let sql = "INSERT INTO burnchain_txids (index_block_hash, txids) VALUES (?1, ?2)"; @@ -2545,7 +2532,6 @@ impl StacksChainState { burn_stack_stx_ops: Vec, burn_transfer_stx_ops: Vec, burn_delegate_stx_ops: Vec, - burn_vote_for_aggregate_key_ops: Vec, affirmation_weight: u64, ) -> Result { if new_tip.parent_block != FIRST_STACKS_BLOCK_HASH { @@ -2610,7 +2596,6 @@ impl StacksChainState { burn_stack_stx_ops, burn_transfer_stx_ops, burn_delegate_stx_ops, - burn_vote_for_aggregate_key_ops, )?; if let Some((miner_payout, user_payouts, parent_payout, reward_info)) = mature_miner_payouts diff --git a/stackslib/src/chainstate/stacks/db/unconfirmed.rs b/stackslib/src/chainstate/stacks/db/unconfirmed.rs index 92d32dd0389..ea76bc54b3e 100644 --- a/stackslib/src/chainstate/stacks/db/unconfirmed.rs +++ b/stackslib/src/chainstate/stacks/db/unconfirmed.rs @@ -675,7 +675,6 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7000, 7001); peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -741,7 +740,6 @@ mod test { }; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -905,7 +903,6 @@ mod test { let initial_balance = 1000000000; let mut peer_config = TestPeerConfig::new(function_name!(), 7002, 7003); peer_config.initial_balances = vec![(addr.to_account_principal(), initial_balance)]; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -971,7 +968,6 @@ mod test { }; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -1150,7 +1146,6 @@ mod test { block_limit: BLOCK_LIMIT_MAINNET_20, network_epoch: PEER_VERSION_EPOCH_2_0, }]); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -1221,7 +1216,6 @@ mod test { }; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index ec8ac4a36cc..c04b03dcda4 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -38,7 +38,7 @@ use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; use stacks_common::util::vrf::*; -use crate::burnchains::{Burnchain, PrivateKey, PublicKey}; +use crate::burnchains::{PrivateKey, PublicKey}; use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionDBConn, SortitionHandleTx}; use crate::chainstate::burn::operations::*; use crate::chainstate::burn::*; @@ -118,7 +118,6 @@ impl std::fmt::Display for MinerStatus { /// halt mining pub fn signal_mining_blocked(miner_status: Arc>) { - debug!("Signaling miner to block"; "thread_id" => ?std::thread::current().id()); match miner_status.lock() { Ok(mut status) => { status.add_blocked(); @@ -165,8 +164,6 @@ pub struct BlockBuilderSettings { pub max_miner_time_ms: u64, pub mempool_settings: MemPoolWalkSettings, pub miner_status: Arc>, - /// Should the builder attempt to confirm any parent microblocks - pub confirm_microblocks: bool, } impl BlockBuilderSettings { @@ -175,7 +172,6 @@ impl BlockBuilderSettings { max_miner_time_ms: u64::MAX, mempool_settings: MemPoolWalkSettings::default(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), - confirm_microblocks: true, } } @@ -184,7 +180,6 @@ impl BlockBuilderSettings { max_miner_time_ms: u64::MAX, mempool_settings: MemPoolWalkSettings::zero(), miner_status: Arc::new(Mutex::new(MinerStatus::make_ready(0))), - confirm_microblocks: true, } } } @@ -1804,7 +1799,6 @@ impl StacksBlockBuilder { &mut self, chainstate: &'a mut StacksChainState, burn_dbconn: &'a SortitionDBConn, - confirm_microblocks: bool, ) -> Result, Error> { debug!( "Miner epoch begin"; @@ -1835,10 +1829,7 @@ impl StacksBlockBuilder { ) .expect("FATAL: more than 2^32 sortitions"); - let parent_microblocks = if !confirm_microblocks { - debug!("Block assembly invoked with confirm_microblocks = false. Will not confirm any microblocks."); - vec![] - } else if StacksChainState::block_crosses_epoch_boundary( + let parent_microblocks = if StacksChainState::block_crosses_epoch_boundary( chainstate.db(), &self.parent_consensus_hash, &self.parent_header_hash, @@ -1999,7 +1990,7 @@ impl StacksBlockBuilder { ) -> Result<(StacksBlock, u64, ExecutionCost, Option), Error> { debug!("Build anchored block from {} transactions", txs.len()); let (mut chainstate, _) = chainstate_handle.reopen()?; - let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn, true)?; + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; let ast_rules = miner_epoch_info.ast_rules; let (mut epoch_tx, _) = builder.epoch_begin(burn_dbconn, &mut miner_epoch_info)?; for tx in txs.drain(..) { @@ -2049,7 +2040,6 @@ impl StacksBlockBuilder { /// Create a block builder for mining pub fn make_block_builder( - burnchain: &Burnchain, mainnet: bool, stacks_parent_header: &StacksHeaderInfo, proof: VRFProof, @@ -2057,19 +2047,20 @@ impl StacksBlockBuilder { pubkey_hash: Hash160, ) -> Result { let builder = if stacks_parent_header.consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { - let (first_block_hash, first_block_height, first_block_ts) = if mainnet { + let (first_block_hash_hex, first_block_height, first_block_ts) = if mainnet { ( - BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH).unwrap(), + BITCOIN_MAINNET_FIRST_BLOCK_HASH, BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP, ) } else { ( - burnchain.first_block_hash, - burnchain.first_block_height, - burnchain.first_block_timestamp, + BITCOIN_TESTNET_FIRST_BLOCK_HASH, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, + BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP, ) }; + let first_block_hash = BurnchainHeaderHash::from_hex(first_block_hash_hex).unwrap(); StacksBlockBuilder::first_pubkey_hash( 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, @@ -2103,20 +2094,21 @@ impl StacksBlockBuilder { /// Create a block builder for regtest mining pub fn make_regtest_block_builder( - burnchain: &Burnchain, stacks_parent_header: &StacksHeaderInfo, proof: VRFProof, total_burn: u64, pubkey_hash: Hash160, ) -> Result { let builder = if stacks_parent_header.consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { + let first_block_hash = + BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); StacksBlockBuilder::first_pubkey_hash( 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, - &burnchain.first_block_hash, - u32::try_from(burnchain.first_block_height) + &first_block_hash, + u32::try_from(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT) .expect("first regtest bitcoin block is over 2^32"), - u64::try_from(burnchain.first_block_timestamp) + u64::try_from(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP) .expect("first regtest bitcoin block timestamp is over 2^64"), &proof, pubkey_hash, @@ -2394,7 +2386,6 @@ impl StacksBlockBuilder { coinbase_tx: &StacksTransaction, settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, - burnchain: &Burnchain, ) -> Result<(StacksBlock, ExecutionCost, u64), Error> { if let TransactionPayload::Coinbase(..) = coinbase_tx.payload { } else { @@ -2417,7 +2408,6 @@ impl StacksBlockBuilder { let (mut chainstate, _) = chainstate_handle.reopen()?; let mut builder = StacksBlockBuilder::make_block_builder( - burnchain, chainstate.mainnet, parent_stacks_header, proof, @@ -2425,14 +2415,9 @@ impl StacksBlockBuilder { pubkey_hash, )?; - if !settings.confirm_microblocks { - builder.parent_microblock_hash = None; - } - let ts_start = get_epoch_time_ms(); - let mut miner_epoch_info = - builder.pre_epoch_begin(&mut chainstate, burn_dbconn, settings.confirm_microblocks)?; + let mut miner_epoch_info = builder.pre_epoch_begin(&mut chainstate, burn_dbconn)?; let ast_rules = miner_epoch_info.ast_rules; if ast_rules != ASTRules::Typical { builder.header.version = cmp::max( diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index f9ad4fff3fb..7247a28f7e3 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -693,12 +693,6 @@ impl FromSql for ThresholdSignature { } } -impl fmt::Display for ThresholdSignature { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - to_hex(&self.serialize_to_vec()).fmt(f) - } -} - impl ToSql for ThresholdSignature { fn to_sql(&self) -> rusqlite::Result { let bytes = self.serialize_to_vec(); diff --git a/stackslib/src/chainstate/stacks/tests/accounting.rs b/stackslib/src/chainstate/stacks/tests/accounting.rs index 8d65e40a4ed..e11224ab625 100644 --- a/stackslib/src/chainstate/stacks/tests/accounting.rs +++ b/stackslib/src/chainstate/stacks/tests/accounting.rs @@ -121,7 +121,6 @@ fn test_bad_microblock_fees_pre_v210() { }, ]; peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -304,7 +303,6 @@ fn test_bad_microblock_fees_pre_v210() { } let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, @@ -406,7 +404,6 @@ fn test_bad_microblock_fees_fix_transition() { (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -627,7 +624,6 @@ fn test_bad_microblock_fees_fix_transition() { } let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, @@ -763,7 +759,6 @@ fn test_get_block_info_v210() { (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -983,7 +978,6 @@ fn test_get_block_info_v210() { } let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, @@ -1135,7 +1129,6 @@ fn test_get_block_info_v210_no_microblocks() { (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -1287,7 +1280,6 @@ fn test_get_block_info_v210_no_microblocks() { mblock_pubkey_hash }; let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, @@ -1494,7 +1486,6 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { }, ]; peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); let num_blocks = 10; let mut anchored_sender_nonce = 0; @@ -1754,7 +1745,6 @@ fn test_coinbase_pay_to_alt_recipient_v210(pay_to_contract: bool) { } let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index ae428af15fc..7e241bad481 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -69,7 +69,6 @@ use crate::util_lib::db::Error as db_error; #[test] fn test_build_anchored_blocks_empty() { let peer_config = TestPeerConfig::new(function_name!(), 2000, 2001); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -139,7 +138,6 @@ fn test_build_anchored_blocks_empty() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -169,7 +167,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -263,7 +260,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -306,7 +302,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let mut peer_config = TestPeerConfig::new(function_name!(), 2022, 2023); peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -404,7 +399,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { ..BlockBuilderSettings::max_value() }, None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -445,7 +439,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let mut peer_config = TestPeerConfig::new(function_name!(), 2004, 2005); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -566,7 +559,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -611,7 +603,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { let mut peer_config = TestPeerConfig::new(function_name!(), 2016, 2017); peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -796,7 +787,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -847,7 +837,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { let mut peer_config = TestPeerConfig::new(function_name!(), 2018, 2019); peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -1053,7 +1042,6 @@ fn test_build_anchored_blocks_connected_by_microblocks_across_epoch_invalid() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -1172,7 +1160,6 @@ fn test_build_anchored_blocks_incrementing_nonces() { let mut peer_config = TestPeerConfig::new(function_name!(), 2030, 2031); peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -1259,7 +1246,6 @@ fn test_build_anchored_blocks_incrementing_nonces() { &coinbase_tx, BlockBuilderSettings::limited(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -1358,7 +1344,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, network_epoch: PEER_VERSION_EPOCH_2_0, }]); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -1507,7 +1492,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { &coinbase_tx, BlockBuilderSettings::limited(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) @@ -1557,7 +1541,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { let mut peer_config = TestPeerConfig::new(function_name!(), 2008, 2009); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -1661,7 +1644,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { &coinbase_tx, BlockBuilderSettings::limited(), None, - &burnchain, ) .unwrap() }; @@ -1704,7 +1686,6 @@ fn test_build_anchored_blocks_empty_chaintips() { let mut peer_config = TestPeerConfig::new(function_name!(), 2010, 2011); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -1768,7 +1749,6 @@ fn test_build_anchored_blocks_empty_chaintips() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -1847,7 +1827,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { let mut peer_config = TestPeerConfig::new(function_name!(), 2013, 2014); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -1975,7 +1954,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -2001,7 +1979,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { #[test] fn test_build_anchored_blocks_invalid() { let peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -2133,7 +2110,7 @@ fn test_build_anchored_blocks_invalid() { let coinbase_tx = make_coinbase(miner, tenure_id as usize); let mut anchored_block = StacksBlockBuilder::build_anchored_block( - chainstate, &sortdb.index_conn(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, &burnchain, + chainstate, &sortdb.index_conn(), &mut mempool, &parent_tip, tip.total_burn, vrf_proof, Hash160([tenure_id as u8; 20]), &coinbase_tx, BlockBuilderSettings::max_value(), None, ).unwrap(); if tenure_id == bad_block_tenure { @@ -2212,7 +2189,6 @@ fn test_build_anchored_blocks_bad_nonces() { let mut peer_config = TestPeerConfig::new(function_name!(), 2012, 2013); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -2412,7 +2388,6 @@ fn test_build_anchored_blocks_bad_nonces() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -2464,7 +2439,6 @@ fn test_build_microblock_stream_forks() { let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -2663,7 +2637,6 @@ fn test_build_microblock_stream_forks() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -2765,7 +2738,6 @@ fn test_build_microblock_stream_forks_with_descendants() { let mut peer_config = TestPeerConfig::new(function_name!(), 2014, 2015); peer_config.initial_balances = balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3090,7 +3062,6 @@ fn test_build_microblock_stream_forks_with_descendants() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); @@ -3233,7 +3204,6 @@ fn test_contract_call_across_clarity_versions() { (addr.to_account_principal(), 1000000000), (addr_anchored.to_account_principal(), 1000000000), ]; - let burnchain = peer_config.burnchain.clone(); let epochs = vec![ StacksEpoch { @@ -3662,12 +3632,11 @@ fn test_contract_call_across_clarity_versions() { let sort_ic = sortdb.index_conn(); let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrf_proof, tip.total_burn, - Hash160([tenure_id as u8; 20]), + Hash160([tenure_id as u8; 20]) ) .unwrap(); @@ -3825,7 +3794,6 @@ fn test_is_tx_problematic() { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -3908,7 +3876,6 @@ fn test_is_tx_problematic() { // attempting to build an anchored block with this tx should cause this tx // to get flagged as problematic let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -4085,7 +4052,6 @@ fn test_is_tx_problematic() { // attempting to build an anchored block with this tx should cause this tx // to get flagged as problematic let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -4135,7 +4101,6 @@ fn test_is_tx_problematic() { // attempting to build an anchored block with this tx should cause this tx // to get flagged as problematic let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -4187,7 +4152,6 @@ fn test_is_tx_problematic() { // attempting to build an anchored block with this tx should cause this tx // to get flagged as problematic let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -4238,7 +4202,6 @@ fn test_is_tx_problematic() { &coinbase_tx, BlockBuilderSettings::limited(), None, - &burnchain, ) .unwrap(); @@ -4308,7 +4271,6 @@ fn mempool_incorporate_pox_unlocks() { peer_config.burnchain.pox_constants.v1_unlock_height = peer_config.epochs.as_ref().unwrap()[1].end_height as u32 + 1; let pox_constants = peer_config.burnchain.pox_constants.clone(); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -4481,7 +4443,6 @@ fn mempool_incorporate_pox_unlocks() { &coinbase_tx, BlockBuilderSettings::limited(), None, - &burnchain, ) .unwrap(); @@ -4523,7 +4484,6 @@ fn test_fee_order_mismatch_nonce_order() { let mut peer_config = TestPeerConfig::new(function_name!(), 2002, 2003); peer_config.initial_balances = vec![(addr.to_account_principal(), 1000000000)]; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -4621,7 +4581,6 @@ fn test_fee_order_mismatch_nonce_order() { &coinbase_tx, BlockBuilderSettings::max_value(), None, - &burnchain, ) .unwrap(); (anchored_block.0, vec![]) diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index cc2fe940b14..fae7a66b42f 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -152,7 +152,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -338,7 +338,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -485,7 +485,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -533,7 +533,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -822,7 +822,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -870,7 +870,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1087,7 +1087,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1136,7 +1136,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1435,7 +1435,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1480,7 +1480,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1682,7 +1682,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1730,7 +1730,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -1990,7 +1990,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -2035,7 +2035,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -2237,7 +2237,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) @@ -2285,7 +2285,7 @@ where let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index c14e14aad40..00a067408ee 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -144,7 +144,7 @@ fn friendly_expect_opt(input: Option, msg: &str) -> A { }) } -pub const DEFAULT_CLI_EPOCH: StacksEpochId = StacksEpochId::Epoch25; +pub const DEFAULT_CLI_EPOCH: StacksEpochId = StacksEpochId::Epoch21; struct EvalInput { marf_kv: MarfedKV, @@ -221,8 +221,6 @@ fn run_analysis_free( LimitedCostTracker::new_free(), DEFAULT_CLI_EPOCH, clarity_version, - // no type map data is used in the clarity_cli - false, ) } @@ -255,8 +253,6 @@ fn run_analysis( cost_track, DEFAULT_CLI_EPOCH, clarity_version, - // no type map data is used in the clarity_cli - false, ) } diff --git a/stackslib/src/clarity_vm/database/marf.rs b/stackslib/src/clarity_vm/database/marf.rs index 3e4088b6eb9..876168d878c 100644 --- a/stackslib/src/clarity_vm/database/marf.rs +++ b/stackslib/src/clarity_vm/database/marf.rs @@ -395,7 +395,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .expect("Attempted to get the open chain tip from an unopened context.") } - fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) .or_else(|e| match e { @@ -417,7 +417,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } - fn get_data(&mut self, key: &str) -> InterpreterResult> { + fn get(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf .get(&self.chain_tip, key) @@ -447,7 +447,7 @@ impl<'a> ClarityBackingStore for ReadOnlyMarfStore<'a> { .transpose() } - fn put_all_data(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { + fn put_all(&mut self, _items: Vec<(String, String)>) -> InterpreterResult<()> { error!("Attempted to commit changes to read-only MARF"); panic!("BUG: attempted commit to read-only MARF"); } @@ -563,7 +563,7 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { Some(&handle_contract_call_special_cases) } - fn get_data(&mut self, key: &str) -> InterpreterResult> { + fn get(&mut self, key: &str) -> InterpreterResult> { trace!("MarfedKV get: {:?} tip={}", key, &self.chain_tip); self.marf .get(&self.chain_tip, key) @@ -593,7 +593,7 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { .transpose() } - fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { self.marf .get_with_proof(&self.chain_tip, key) .or_else(|e| match e { @@ -678,7 +678,7 @@ impl<'a> ClarityBackingStore for WritableMarfStore<'a> { } } - fn put_all_data(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { + fn put_all(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { let mut keys = Vec::new(); let mut values = Vec::new(); for (key, value) in items.into_iter() { diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index c9c21957f3b..fdf45f3d21c 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -728,11 +728,11 @@ impl ClarityBackingStore for MemoryBackingStore { Err(RuntimeErrorType::UnknownBlockHeaderHash(BlockHeaderHash(bhh.0)).into()) } - fn get_data(&mut self, key: &str) -> InterpreterResult> { + fn get(&mut self, key: &str) -> InterpreterResult> { SqliteConnection::get(self.get_side_store(), key) } - fn get_data_with_proof(&mut self, key: &str) -> InterpreterResult)>> { + fn get_with_proof(&mut self, key: &str) -> InterpreterResult)>> { Ok(SqliteConnection::get(self.get_side_store(), key)?.map(|x| (x, vec![]))) } @@ -764,7 +764,7 @@ impl ClarityBackingStore for MemoryBackingStore { Some(&handle_contract_call_special_cases) } - fn put_all_data(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { + fn put_all(&mut self, items: Vec<(String, String)>) -> InterpreterResult<()> { for (key, value) in items.into_iter() { SqliteConnection::put(self.get_side_store(), &key, &value)?; } diff --git a/stackslib/src/clarity_vm/tests/costs.rs b/stackslib/src/clarity_vm/tests/costs.rs index a4d9f9294a8..8f9bcd94144 100644 --- a/stackslib/src/clarity_vm/tests/costs.rs +++ b/stackslib/src/clarity_vm/tests/costs.rs @@ -1537,7 +1537,7 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi tracker.contract_call_circuits().is_empty(), "No contract call circuits should have been processed" ); - for (target, referenced_function) in tracker.cost_function_references().into_iter() { + for (target, referenced_function) in tracker.cost_function_references().iter() { assert_eq!( &referenced_function.contract_id, &boot_code_id("costs", use_mainnet), @@ -1652,8 +1652,8 @@ fn test_cost_voting_integration(use_mainnet: bool, clarity_version: ClarityVersi assert_eq!(circuit2.unwrap().contract_id, cost_definer); assert_eq!(circuit2.unwrap().function_name, "cost-definition-multi-arg"); - for (target, referenced_function) in tracker.cost_function_references().into_iter() { - if target == &ClarityCostFunction::Le { + for (target, referenced_function) in tracker.cost_function_references().iter() { + if **target == ClarityCostFunction::Le { assert_eq!(&referenced_function.contract_id, &cost_definer); assert_eq!(&referenced_function.function_name, "cost-definition-le"); } else { diff --git a/stackslib/src/core/mod.rs b/stackslib/src/core/mod.rs index 629cb02c9a0..0f44d7af9af 100644 --- a/stackslib/src/core/mod.rs +++ b/stackslib/src/core/mod.rs @@ -134,7 +134,7 @@ pub const BITCOIN_TESTNET_STACKS_21_BURN_HEIGHT: u64 = 2_422_101; pub const BITCOIN_TESTNET_STACKS_22_BURN_HEIGHT: u64 = 2_431_300; pub const BITCOIN_TESTNET_STACKS_23_BURN_HEIGHT: u64 = 2_431_633; pub const BITCOIN_TESTNET_STACKS_24_BURN_HEIGHT: u64 = 2_432_545; -pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 2_583_893; +pub const BITCOIN_TESTNET_STACKS_25_BURN_HEIGHT: u64 = 20_000_000; pub const BITCOIN_TESTNET_STACKS_30_BURN_HEIGHT: u64 = 30_000_000; pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; @@ -443,13 +443,13 @@ lazy_static! { StacksEpoch { epoch_id: StacksEpochId::Epoch25, start_height: 6000, - end_height: 7001, + end_height: 7000, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_2_5 }, StacksEpoch { epoch_id: StacksEpochId::Epoch30, - start_height: 7001, + start_height: 7000, end_height: STACKS_EPOCH_MAX, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), network_epoch: PEER_VERSION_EPOCH_3_0 diff --git a/stackslib/src/lib.rs b/stackslib/src/lib.rs index bd634cef647..7cc7922c555 100644 --- a/stackslib/src/lib.rs +++ b/stackslib/src/lib.rs @@ -71,6 +71,10 @@ pub mod cost_estimates; pub mod clarity_cli; +#[cfg(any(test, feature = "testing"))] +#[macro_use] +pub mod proptesting; + // set via _compile-time_ envars const GIT_BRANCH: Option<&'static str> = option_env!("GIT_BRANCH"); const GIT_COMMIT: Option<&'static str> = option_env!("GIT_COMMIT"); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 1040f867c61..fa4833cdcf6 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -26,10 +26,10 @@ extern crate stacks_common; #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; -#[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] +#[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; -#[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] +#[cfg(not(target_env = "msvc"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; @@ -577,7 +577,6 @@ simulating a miner. } let start = get_epoch_time_ms(); - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); @@ -646,7 +645,6 @@ simulating a miner. &coinbase_tx, settings, None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), ); let stop = get_epoch_time_ms(); @@ -1338,7 +1336,6 @@ simulating a miner. process::exit(1); } - let burnchain_path = format!("{}/mainnet/burnchain", &argv[2]); let sort_db_path = format!("{}/mainnet/burnchain/sortition", &argv[2]); let chain_state_path = format!("{}/mainnet/chainstate/", &argv[2]); @@ -1521,7 +1518,6 @@ simulating a miner. &coinbase_tx, settings, None, - &Burnchain::new(&burnchain_path, "bitcoin", "main").unwrap(), ); let stop = get_epoch_time_ms(); diff --git a/stackslib/src/net/api/getaccount.rs b/stackslib/src/net/api/getaccount.rs index 9aa5ef260c8..f29f62cb9b1 100644 --- a/stackslib/src/net/api/getaccount.rs +++ b/stackslib/src/net/api/getaccount.rs @@ -152,14 +152,14 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let v3_unlock_height = clarity_db.get_v3_unlock_height().ok()?; let (balance, balance_proof) = if with_proof { clarity_db - .get_data_with_proof::(&key) + .get_with_proof::(&key) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| (STXBalance::zero(), Some("".into()))) } else { clarity_db - .get_data::(&key) + .get::(&key) .ok() .flatten() .map(|a| (a, None)) @@ -169,14 +169,14 @@ impl RPCRequestHandler for RPCGetAccountRequestHandler { let key = ClarityDatabase::make_key_for_account_nonce(&account); let (nonce, nonce_proof) = if with_proof { clarity_db - .get_data_with_proof(&key) + .get_with_proof(&key) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) .unwrap_or_else(|| (0, Some("".into()))) } else { clarity_db - .get_data(&key) + .get(&key) .ok() .flatten() .map(|a| (a, None)) diff --git a/stackslib/src/net/api/getblock_v3.rs b/stackslib/src/net/api/getblock_v3.rs deleted file mode 100644 index 090afec04cd..00000000000 --- a/stackslib/src/net/api/getblock_v3.rs +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Seek, SeekFrom, Write}; -use std::{fs, io}; - -use regex::{Captures, Regex}; -use rusqlite::Connection; -use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; -use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::to_hex; -use {serde, serde_json}; - -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::Error as ChainError; -use crate::net::http::{ - parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, - HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, - HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, -}; -use crate::net::httpcore::{ - HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, - StacksHttpResponse, -}; -use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -#[derive(Clone)] -pub struct RPCNakamotoBlockRequestHandler { - pub block_id: Option, -} - -impl RPCNakamotoBlockRequestHandler { - pub fn new() -> Self { - Self { block_id: None } - } -} - -pub struct NakamotoBlockStream { - /// index block hash of the block to download - pub index_block_hash: StacksBlockId, - /// consensus hash of this block (identifies its tenure; used by the tenure stream) - pub consensus_hash: ConsensusHash, - /// parent index block hash of the block to download (used by the tenure stream) - pub parent_block_id: StacksBlockId, - /// offset into the blob - pub offset: u64, - /// total number of bytes read. - pub total_bytes: u64, - /// Connection to the staging DB - pub staging_db_conn: NakamotoStagingBlocksConn, - /// rowid of the block - pub rowid: i64, -} - -impl NakamotoBlockStream { - pub fn new( - chainstate: &StacksChainState, - block_id: StacksBlockId, - consensus_hash: ConsensusHash, - parent_block_id: StacksBlockId, - ) -> Result { - let staging_db_path = chainstate.get_nakamoto_staging_blocks_path()?; - let db_conn = StacksChainState::open_nakamoto_staging_blocks(&staging_db_path, false)?; - let rowid = db_conn - .conn() - .get_nakamoto_block_rowid(&block_id)? - .ok_or(ChainError::NoSuchBlockError)?; - - Ok(NakamotoBlockStream { - index_block_hash: block_id, - consensus_hash, - parent_block_id, - offset: 0, - total_bytes: 0, - staging_db_conn: db_conn, - rowid, - }) - } - - /// reset the stream to send another block. - /// Does not change the DB connection or consensus hash. - pub fn reset( - &mut self, - block_id: StacksBlockId, - parent_block_id: StacksBlockId, - ) -> Result<(), ChainError> { - let rowid = self - .staging_db_conn - .conn() - .get_nakamoto_block_rowid(&block_id)? - .ok_or(ChainError::NoSuchBlockError)?; - - self.index_block_hash = block_id; - self.parent_block_id = parent_block_id; - self.offset = 0; - self.total_bytes = 0; - self.rowid = rowid; - Ok(()) - } -} - -/// Decode the HTTP request -impl HttpRequest for RPCNakamotoBlockRequestHandler { - fn verb(&self) -> &'static str { - "GET" - } - - fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/blocks/(?P[0-9a-f]{64})$"#).unwrap() - } - - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. - fn try_parse_request( - &mut self, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _body: &[u8], - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let block_id_str = captures - .name("block_id") - .ok_or_else(|| { - Error::DecodeError("Failed to match path to block ID group".to_string()) - })? - .as_str(); - - let block_id = StacksBlockId::from_hex(block_id_str).map_err(|_| { - Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) - })?; - self.block_id = Some(block_id); - - Ok(HttpRequestContents::new().query_string(query)) - } -} - -impl RPCRequestHandler for RPCNakamotoBlockRequestHandler { - /// Reset internal state - fn restart(&mut self) { - self.block_id = None; - } - - /// Make the response - fn try_handle_request( - &mut self, - preamble: HttpRequestPreamble, - _contents: HttpRequestContents, - node: &mut StacksNodeState, - ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let block_id = self - .block_id - .take() - .ok_or(NetError::SendError("Missing `block_id`".into()))?; - - let stream_res = - node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { - let Some(header) = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &block_id)? - else { - return Err(ChainError::NoSuchBlockError); - }; - let Some(nakamoto_header) = header.anchored_header.as_stacks_nakamoto() else { - return Err(ChainError::NoSuchBlockError); - }; - NakamotoBlockStream::new( - chainstate, - block_id.clone(), - nakamoto_header.consensus_hash.clone(), - nakamoto_header.parent_block_id.clone(), - ) - }); - - // start loading up the block - let stream = match stream_res { - Ok(stream) => stream, - Err(ChainError::NoSuchBlockError) => { - return StacksHttpResponse::new_error( - &preamble, - &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), - ) - .try_into_contents() - .map_err(NetError::from) - } - Err(e) => { - // nope -- error trying to check - let msg = format!("Failed to load block {}: {:?}\n", &block_id, &e); - warn!("{}", &msg); - return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) - .try_into_contents() - .map_err(NetError::from); - } - }; - - let resp_preamble = HttpResponsePreamble::from_http_request_preamble( - &preamble, - 200, - "OK", - None, - HttpContentType::Bytes, - ); - - Ok(( - resp_preamble, - HttpResponseContents::from_stream(Box::new(stream)), - )) - } -} - -/// Decode the HTTP response -impl HttpResponse for RPCNakamotoBlockRequestHandler { - /// Decode this response from a byte stream. This is called by the client to decode this - /// message - fn try_parse_response( - &self, - preamble: &HttpResponsePreamble, - body: &[u8], - ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; - Ok(HttpResponsePayload::Bytes(bytes)) - } -} - -/// Stream implementation for a Nakamoto block -impl HttpChunkGenerator for NakamotoBlockStream { - #[cfg(test)] - fn hint_chunk_size(&self) -> usize { - // make this hurt - 32 - } - - #[cfg(not(test))] - fn hint_chunk_size(&self) -> usize { - 4096 - } - - fn generate_next_chunk(&mut self) -> Result, String> { - let mut blob_fd = self - .staging_db_conn - .open_nakamoto_block(self.rowid, false) - .map_err(|e| { - let msg = format!( - "Failed to open Nakamoto block {}: {:?}", - &self.index_block_hash, &e - ); - warn!("{}", &msg); - msg - })?; - - blob_fd.seek(SeekFrom::Start(self.offset)).map_err(|e| { - let msg = format!( - "Failed to read Nakamoto block {}: {:?}", - &self.index_block_hash, &e - ); - warn!("{}", &msg); - msg - })?; - - let mut buf = vec![0u8; self.hint_chunk_size()]; - let num_read = blob_fd.read(&mut buf).map_err(|e| { - let msg = format!( - "Failed to read Nakamoto block {}: {:?}", - &self.index_block_hash, &e - ); - warn!("{}", &msg); - msg - })?; - - buf.truncate(num_read); - - self.offset += num_read as u64; - self.total_bytes += num_read as u64; - - Ok(buf) - } -} - -impl StacksHttpRequest { - pub fn new_get_nakamoto_block(host: PeerHost, block_id: StacksBlockId) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "GET".into(), - format!("/v3/blocks/{}", &block_id), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data") - } -} - -impl StacksHttpResponse { - /// Decode an HTTP response into a block. - /// If it fails, return Self::Error(..) - pub fn decode_nakamoto_block(self) -> Result { - let contents = self.get_http_payload_ok()?; - - // contents will be raw bytes - let block_bytes: Vec = contents.try_into()?; - let block = NakamotoBlock::consensus_deserialize(&mut &block_bytes[..])?; - - Ok(block) - } -} diff --git a/stackslib/src/net/api/getcontractsrc.rs b/stackslib/src/net/api/getcontractsrc.rs index ab97d45eb43..505299d769a 100644 --- a/stackslib/src/net/api/getcontractsrc.rs +++ b/stackslib/src/net/api/getcontractsrc.rs @@ -141,12 +141,12 @@ impl RPCRequestHandler for RPCGetContractSrcRequestHandler { let source = db.get_contract_src(&contract_identifier)?; let contract_commit_key = make_contract_hash_key(&contract_identifier); let (contract_commit, proof) = if with_proof { - db.get_data_with_proof::(&contract_commit_key) + db.get_with_proof::(&contract_commit_key) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { - db.get_data::(&contract_commit_key) + db.get::(&contract_commit_key) .ok() .flatten() .map(|a| (a, None))? diff --git a/stackslib/src/net/api/getdatavar.rs b/stackslib/src/net/api/getdatavar.rs index d6ef8f0dec7..aa1c1116af1 100644 --- a/stackslib/src/net/api/getdatavar.rs +++ b/stackslib/src/net/api/getdatavar.rs @@ -154,16 +154,12 @@ impl RPCRequestHandler for RPCGetDataVarRequestHandler { clarity_tx.with_clarity_db_readonly(|clarity_db| { let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_data_with_proof(&key) + .get_with_proof(&key) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b)))))? } else { - clarity_db - .get_data(&key) - .ok() - .flatten() - .map(|a| (a, None))? + clarity_db.get(&key).ok().flatten().map(|a| (a, None))? }; let data = format!("0x{}", value_hex); diff --git a/stackslib/src/net/api/getmapentry.rs b/stackslib/src/net/api/getmapentry.rs index 1265d8e07f0..099ae260bd3 100644 --- a/stackslib/src/net/api/getmapentry.rs +++ b/stackslib/src/net/api/getmapentry.rs @@ -183,7 +183,7 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { clarity_tx.with_clarity_db_readonly(|clarity_db| { let (value_hex, marf_proof): (String, _) = if with_proof { clarity_db - .get_data_with_proof(&key) + .get_with_proof(&key) .ok() .flatten() .map(|(a, b)| (a, Some(format!("0x{}", to_hex(&b))))) @@ -193,7 +193,7 @@ impl RPCRequestHandler for RPCGetMapEntryRequestHandler { }) } else { clarity_db - .get_data(&key) + .get(&key) .ok() .flatten() .map(|a| (a, None)) diff --git a/stackslib/src/net/api/gettenure.rs b/stackslib/src/net/api/gettenure.rs deleted file mode 100644 index 58ff2f96f06..00000000000 --- a/stackslib/src/net/api/gettenure.rs +++ /dev/null @@ -1,364 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Seek, SeekFrom, Write}; -use std::{fs, io}; - -use regex::{Captures, Regex}; -use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; -use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::to_hex; -use {serde, serde_json}; - -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::Error as ChainError; -use crate::net::api::getblock_v3::NakamotoBlockStream; -use crate::net::http::{ - parse_bytes, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, HttpNotFound, - HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, HttpResponseContents, - HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, -}; -use crate::net::httpcore::{ - HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, - StacksHttpResponse, -}; -use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -#[derive(Clone)] -pub struct RPCNakamotoTenureRequestHandler { - /// Block to start streaming from. It and its ancestors will be incrementally streamed until one of - /// hte following happens: - /// * we reach the first block in the tenure - /// * we would exceed MAX_MESSAGE_LEN bytes transmitted if we started sending the next block - pub block_id: Option, - /// What's the final block ID to stream from? - /// Passed as `stop=` query parameter - pub last_block_id: Option, -} - -impl RPCNakamotoTenureRequestHandler { - pub fn new() -> Self { - Self { - block_id: None, - last_block_id: None, - } - } -} - -pub struct NakamotoTenureStream { - /// stream for the current block - pub block_stream: NakamotoBlockStream, - /// connection to the headers DB - pub headers_conn: DBConn, - /// total bytess sent so far - pub total_sent: u64, - /// stop streaming if we reach this block - pub last_block_id: Option, -} - -impl NakamotoTenureStream { - pub fn new( - chainstate: &StacksChainState, - block_id: StacksBlockId, - consensus_hash: ConsensusHash, - parent_block_id: StacksBlockId, - last_block_id: Option, - ) -> Result { - let block_stream = - NakamotoBlockStream::new(chainstate, block_id, consensus_hash, parent_block_id)?; - let headers_conn = chainstate.reopen_db()?; - Ok(NakamotoTenureStream { - block_stream, - headers_conn, - total_sent: 0, - last_block_id, - }) - } - - /// Start streaming the next block (i.e. the parent of the block we last streamed). - /// Return Ok(true) if we can fit the block into the stream. - /// Return Ok(false) if not. The caller will need to call this RPC method again with the block - /// ID of the last block it received. - /// Return Err(..) on DB error - pub fn next_block(&mut self) -> Result { - let parent_header = NakamotoChainState::get_block_header( - &self.headers_conn, - &self.block_stream.parent_block_id, - )? - .ok_or(ChainError::NoSuchBlockError)?; - - // stop sending if the parent is an epoch2 block - let Some(parent_nakamoto_header) = parent_header.anchored_header.as_stacks_nakamoto() - else { - return Ok(false); - }; - - if let Some(last_block_id) = self.last_block_id.as_ref() { - if &parent_nakamoto_header.block_id() == last_block_id { - // asked to stop - return Ok(false); - } - } - - // stop sending if the parent is in a different tenure - if parent_nakamoto_header.consensus_hash != self.block_stream.consensus_hash { - return Ok(false); - } - - let parent_size = self - .block_stream - .staging_db_conn - .conn() - .get_nakamoto_block_size(&self.block_stream.parent_block_id)? - .ok_or(ChainError::NoSuchBlockError)?; - - self.total_sent = self - .total_sent - .saturating_add(self.block_stream.total_bytes); - if self.total_sent.saturating_add(parent_size) > MAX_MESSAGE_LEN.into() { - // out of space to send this - return Ok(false); - } - - self.block_stream.reset( - parent_nakamoto_header.block_id(), - parent_nakamoto_header.parent_block_id.clone(), - )?; - Ok(true) - } -} - -/// Decode the HTTP request -impl HttpRequest for RPCNakamotoTenureRequestHandler { - fn verb(&self) -> &'static str { - "GET" - } - - fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/tenures/(?P[0-9a-f]{64})$"#).unwrap() - } - - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. - fn try_parse_request( - &mut self, - preamble: &HttpRequestPreamble, - captures: &Captures, - query: Option<&str>, - _body: &[u8], - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - - let block_id_str = captures - .name("block_id") - .ok_or_else(|| { - Error::DecodeError("Failed to match path to block ID group".to_string()) - })? - .as_str(); - - let block_id = StacksBlockId::from_hex(block_id_str).map_err(|_| { - Error::DecodeError("Invalid path: unparseable consensus hash".to_string()) - })?; - - let req_contents = HttpRequestContents::new().query_string(query); - let last_block_id = req_contents - .get_query_arg("stop") - .map(|last_block_id_hex| StacksBlockId::from_hex(&last_block_id_hex)) - .transpose() - .map_err(|e| { - Error::DecodeError(format!("Failed to parse stop= query parameter: {:?}", &e)) - })?; - - self.last_block_id = last_block_id; - self.block_id = Some(block_id); - - Ok(req_contents) - } -} - -impl RPCRequestHandler for RPCNakamotoTenureRequestHandler { - /// Reset internal state - fn restart(&mut self) { - self.block_id = None; - self.last_block_id = None; - } - - /// Make the response - fn try_handle_request( - &mut self, - preamble: HttpRequestPreamble, - _contents: HttpRequestContents, - node: &mut StacksNodeState, - ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let block_id = self - .block_id - .take() - .ok_or(NetError::SendError("Missing `block_id`".into()))?; - - let stream_res = - node.with_node_state(|_network, _sortdb, chainstate, _mempool, _rpc_args| { - let Some(header) = - NakamotoChainState::get_block_header_nakamoto(chainstate.db(), &block_id)? - else { - return Err(ChainError::NoSuchBlockError); - }; - let Some(nakamoto_header) = header.anchored_header.as_stacks_nakamoto() else { - return Err(ChainError::NoSuchBlockError); - }; - NakamotoTenureStream::new( - chainstate, - block_id, - nakamoto_header.consensus_hash.clone(), - nakamoto_header.parent_block_id.clone(), - self.last_block_id.clone(), - ) - }); - - // start loading up the block - let stream = match stream_res { - Ok(stream) => stream, - Err(ChainError::NoSuchBlockError) => { - return StacksHttpResponse::new_error( - &preamble, - &HttpNotFound::new(format!("No such block {:?}\n", &block_id)), - ) - .try_into_contents() - .map_err(NetError::from) - } - Err(e) => { - // nope -- error trying to check - let msg = format!("Failed to load block {}: {:?}\n", &block_id, &e); - warn!("{}", &msg); - return StacksHttpResponse::new_error(&preamble, &HttpServerError::new(msg)) - .try_into_contents() - .map_err(NetError::from); - } - }; - - let resp_preamble = HttpResponsePreamble::from_http_request_preamble( - &preamble, - 200, - "OK", - None, - HttpContentType::Bytes, - ); - - Ok(( - resp_preamble, - HttpResponseContents::from_stream(Box::new(stream)), - )) - } -} - -/// Decode the HTTP response -impl HttpResponse for RPCNakamotoTenureRequestHandler { - /// Decode this response from a byte stream. This is called by the client to decode this - /// message - fn try_parse_response( - &self, - preamble: &HttpResponsePreamble, - body: &[u8], - ) -> Result { - let bytes = parse_bytes(preamble, body, MAX_MESSAGE_LEN.into())?; - Ok(HttpResponsePayload::Bytes(bytes)) - } -} - -/// Stream implementation for a Nakamoto block -impl HttpChunkGenerator for NakamotoTenureStream { - #[cfg(test)] - fn hint_chunk_size(&self) -> usize { - // make this hurt - 32 - } - - #[cfg(not(test))] - fn hint_chunk_size(&self) -> usize { - 4096 - } - - fn generate_next_chunk(&mut self) -> Result, String> { - let next_block_chunk = self.block_stream.generate_next_chunk()?; - if next_block_chunk.len() > 0 { - // have block data to send - return Ok(next_block_chunk); - } - - // load up next block - let send_more = self.next_block().map_err(|e| { - let msg = format!("Failed to load next block in this tenure: {:?}", &e); - warn!("{}", &msg); - msg - })?; - - if !send_more { - return Ok(vec![]); - } - - self.block_stream.generate_next_chunk() - } -} - -impl StacksHttpRequest { - pub fn new_get_nakamoto_tenure( - host: PeerHost, - block_id: StacksBlockId, - last_block_id: Option, - ) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "GET".into(), - format!( - "/v3/tenures/{}{}", - &block_id, - last_block_id - .map(|block_id| format!("?stop={}", &block_id)) - .unwrap_or("".to_string()) - ), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data") - } -} - -impl StacksHttpResponse { - /// Decode an HTTP response into a tenure. - /// The bytes are a concatenation of Nakamoto blocks, with no length prefix. - /// If it fails, return Self::Error(..) - pub fn decode_nakamoto_tenure(self) -> Result, NetError> { - let contents = self.get_http_payload_ok()?; - - // contents will be raw bytes - let tenure_bytes: Vec = contents.try_into()?; - let ptr = &mut tenure_bytes.as_slice(); - - let mut blocks = vec![]; - while ptr.len() > 0 { - let block = NakamotoBlock::consensus_deserialize(ptr)?; - blocks.push(block); - } - - Ok(blocks) - } -} diff --git a/stackslib/src/net/api/gettenureinfo.rs b/stackslib/src/net/api/gettenureinfo.rs deleted file mode 100644 index 1d690d47eee..00000000000 --- a/stackslib/src/net/api/gettenureinfo.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::io::{Read, Seek, SeekFrom, Write}; -use std::{fs, io}; - -use regex::{Captures, Regex}; -use serde::de::Error as de_Error; -use stacks_common::codec::{StacksMessageCodec, MAX_MESSAGE_LEN}; -use stacks_common::types::chainstate::{ConsensusHash, StacksBlockId}; -use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::to_hex; -use {serde, serde_json}; - -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState, NakamotoStagingBlocksConn}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::Error as ChainError; -use crate::net::api::getblock_v3::NakamotoBlockStream; -use crate::net::http::{ - parse_bytes, parse_json, Error, HttpBadRequest, HttpChunkGenerator, HttpContentType, - HttpNotFound, HttpRequest, HttpRequestContents, HttpRequestPreamble, HttpResponse, - HttpResponseContents, HttpResponsePayload, HttpResponsePreamble, HttpServerError, HttpVersion, -}; -use crate::net::httpcore::{ - HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, StacksHttpRequest, - StacksHttpResponse, -}; -use crate::net::{Error as NetError, StacksNodeState, TipRequest, MAX_HEADERS}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -#[derive(Clone)] -pub struct RPCNakamotoTenureInfoRequestHandler {} - -impl RPCNakamotoTenureInfoRequestHandler { - pub fn new() -> Self { - Self {} - } -} - -/// The view of this node's current tenure. -/// All of this information can be found from the PeerNetwork struct, so loading this up should -/// incur zero disk I/O. -#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] -pub struct RPCGetTenureInfo { - /// The highest known consensus hash (identifies the current tenure) - pub consensus_hash: ConsensusHash, - /// The tenure-start block ID of the current tenure - pub tenure_start_block_id: StacksBlockId, - /// The consensus hash of the parent tenure - pub parent_consensus_hash: ConsensusHash, - /// The block hash of the parent tenure's start block - pub parent_tenure_start_block_id: StacksBlockId, - /// The highest Stacks block ID in the current tenure - pub tip_block_id: StacksBlockId, - /// The height of this tip - pub tip_height: u64, - /// Which reward cycle we're in - pub reward_cycle: u64, -} - -/// Decode the HTTP request -impl HttpRequest for RPCNakamotoTenureInfoRequestHandler { - fn verb(&self) -> &'static str { - "GET" - } - - fn path_regex(&self) -> Regex { - Regex::new(r#"^/v3/tenures/info"#).unwrap() - } - - /// Try to decode this request. - /// There's nothing to load here, so just make sure the request is well-formed. - fn try_parse_request( - &mut self, - preamble: &HttpRequestPreamble, - _captures: &Captures, - query: Option<&str>, - _body: &[u8], - ) -> Result { - if preamble.get_content_length() != 0 { - return Err(Error::DecodeError( - "Invalid Http request: expected 0-length body".to_string(), - )); - } - Ok(HttpRequestContents::new().query_string(query)) - } -} - -impl RPCRequestHandler for RPCNakamotoTenureInfoRequestHandler { - /// Reset internal state - fn restart(&mut self) {} - - /// Make the response - fn try_handle_request( - &mut self, - preamble: HttpRequestPreamble, - _contents: HttpRequestContents, - node: &mut StacksNodeState, - ) -> Result<(HttpResponsePreamble, HttpResponseContents), NetError> { - let info = node.with_node_state(|network, _sortdb, _chainstate, _mempool, _rpc_args| { - RPCGetTenureInfo { - consensus_hash: network.stacks_tip.0.clone(), - tenure_start_block_id: network.tenure_start_block_id.clone(), - parent_consensus_hash: network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &network.parent_stacks_tip.0, - &network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1), - tip_height: network.stacks_tip.2, - reward_cycle: network - .burnchain - .block_height_to_reward_cycle(network.burnchain_tip.block_height) - .expect("FATAL: burnchain tip before system start"), - } - }); - - let preamble = HttpResponsePreamble::ok_json(&preamble); - let body = HttpResponseContents::try_from_json(&info)?; - Ok((preamble, body)) - } -} - -/// Decode the HTTP response -impl HttpResponse for RPCNakamotoTenureInfoRequestHandler { - fn try_parse_response( - &self, - preamble: &HttpResponsePreamble, - body: &[u8], - ) -> Result { - let peer_info: RPCGetTenureInfo = parse_json(preamble, body)?; - Ok(HttpResponsePayload::try_from_json(peer_info)?) - } -} - -impl StacksHttpRequest { - /// Make a new getinfo request to this endpoint - pub fn new_get_nakamoto_tenure_info(host: PeerHost) -> StacksHttpRequest { - StacksHttpRequest::new_for_peer( - host, - "GET".into(), - "/v3/tenures/info".into(), - HttpRequestContents::new(), - ) - .expect("FATAL: failed to construct request from infallible data") - } -} - -impl StacksHttpResponse { - pub fn decode_nakamoto_tenure_info(self) -> Result { - let contents = self.get_http_payload_ok()?; - let response_json: serde_json::Value = contents.try_into()?; - let tenure_info: RPCGetTenureInfo = serde_json::from_value(response_json) - .map_err(|_e| Error::DecodeError("Failed to decode JSON".to_string()))?; - Ok(tenure_info) - } -} diff --git a/stackslib/src/net/api/mod.rs b/stackslib/src/net/api/mod.rs index f1af0a9e604..c1a042aef41 100644 --- a/stackslib/src/net/api/mod.rs +++ b/stackslib/src/net/api/mod.rs @@ -36,7 +36,6 @@ pub mod getaccount; pub mod getattachment; pub mod getattachmentsinv; pub mod getblock; -pub mod getblock_v3; pub mod getconstantval; pub mod getcontractabi; pub mod getcontractsrc; @@ -54,8 +53,6 @@ pub mod getstackerdbchunk; pub mod getstackerdbmetadata; pub mod getstackers; pub mod getstxtransfercost; -pub mod gettenure; -pub mod gettenureinfo; pub mod gettransaction_unconfirmed; pub mod liststackerdbreplicas; pub mod postblock; @@ -81,7 +78,6 @@ impl StacksHttp { self.register_rpc_endpoint(getattachment::RPCGetAttachmentRequestHandler::new()); self.register_rpc_endpoint(getattachmentsinv::RPCGetAttachmentsInvRequestHandler::new()); self.register_rpc_endpoint(getblock::RPCBlocksRequestHandler::new()); - self.register_rpc_endpoint(getblock_v3::RPCNakamotoBlockRequestHandler::new()); self.register_rpc_endpoint(getconstantval::RPCGetConstantValRequestHandler::new()); self.register_rpc_endpoint(getcontractabi::RPCGetContractAbiRequestHandler::new()); self.register_rpc_endpoint(getcontractsrc::RPCGetContractSrcRequestHandler::new()); @@ -109,8 +105,6 @@ impl StacksHttp { getstackerdbmetadata::RPCGetStackerDBMetadataRequestHandler::new(), ); self.register_rpc_endpoint(getstackers::GetStackersRequestHandler::default()); - self.register_rpc_endpoint(gettenure::RPCNakamotoTenureRequestHandler::new()); - self.register_rpc_endpoint(gettenureinfo::RPCNakamotoTenureInfoRequestHandler::new()); self.register_rpc_endpoint( gettransaction_unconfirmed::RPCGetTransactionUnconfirmedRequestHandler::new(), ); diff --git a/stackslib/src/net/api/postblock_proposal.rs b/stackslib/src/net/api/postblock_proposal.rs index 91de89a1739..50b04152978 100644 --- a/stackslib/src/net/api/postblock_proposal.rs +++ b/stackslib/src/net/api/postblock_proposal.rs @@ -209,19 +209,13 @@ impl NakamotoBlockProposal { let burn_dbconn = sortdb.index_conn(); let sort_tip = SortitionDB::get_canonical_sortition_tip(sortdb.conn())?; let mut db_handle = sortdb.index_handle(&sort_tip); - let expected_burn_opt = + let expected_burn = NakamotoChainState::get_expected_burns(&mut db_handle, chainstate.db(), &self.block)?; - if expected_burn_opt.is_none() { - return Err(BlockValidateRejectReason { - reason_code: ValidateRejectCode::UnknownParent, - reason: "Failed to find parent expected burns".into(), - }); - }; // Static validation checks NakamotoChainState::validate_nakamoto_block_burnchain( &db_handle, - expected_burn_opt, + expected_burn, &self.block, mainnet, self.chain_id, diff --git a/stackslib/src/net/api/poststackerdbchunk.rs b/stackslib/src/net/api/poststackerdbchunk.rs index 2f28dd3f2dc..d7901534e09 100644 --- a/stackslib/src/net/api/poststackerdbchunk.rs +++ b/stackslib/src/net/api/poststackerdbchunk.rs @@ -145,16 +145,6 @@ impl StackerDBErrorCodes { "reason": self.reason() }) } - - #[cfg_attr(test, mutants::skip)] - pub fn from_code(code: u32) -> Option { - match code { - 0 => Some(Self::DataAlreadyExists), - 1 => Some(Self::NoSuchSlot), - 2 => Some(Self::BadSigner), - _ => None, - } - } } impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { @@ -229,23 +219,31 @@ impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { } }; - let err_code = if slot_metadata_opt.is_some() { - if let NetError::BadSlotSigner(..) = e { + let (reason, slot_metadata_opt) = if let Some(slot_metadata) = slot_metadata_opt + { + let code = if let NetError::BadSlotSigner(..) = e { StackerDBErrorCodes::BadSigner } else { StackerDBErrorCodes::DataAlreadyExists - } + }; + + ( + serde_json::to_string(&code.into_json()) + .unwrap_or("(unable to encode JSON)".to_string()), + Some(slot_metadata), + ) } else { - StackerDBErrorCodes::NoSuchSlot + ( + serde_json::to_string(&StackerDBErrorCodes::NoSuchSlot.into_json()) + .unwrap_or("(unable to encode JSON)".to_string()), + None, + ) }; - let reason = serde_json::to_string(&err_code.clone().into_json()) - .unwrap_or("(unable to encode JSON)".to_string()); let ack = StackerDBChunkAckData { accepted: false, reason: Some(reason), metadata: slot_metadata_opt, - code: Some(err_code.code()), }; return Ok(ack); } @@ -283,7 +281,6 @@ impl RPCRequestHandler for RPCPostStackerDBChunkRequestHandler { accepted: true, reason: None, metadata: Some(slot_metadata), - code: None, }; return Ok(ack); diff --git a/stackslib/src/net/api/tests/getblock_v3.rs b/stackslib/src/net/api/tests/getblock_v3.rs deleted file mode 100644 index de1a76f7481..00000000000 --- a/stackslib/src/net/api/tests/getblock_v3.rs +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; -use clarity::vm::{ClarityName, ContractName}; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, -}; -use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; - -use super::TestRPC; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use crate::chainstate::stacks::db::blocks::test::*; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, -}; -use crate::net::api::getblock_v3::NakamotoBlockStream; -use crate::net::api::*; -use crate::net::connection::ConnectionOptions; -use crate::net::http::HttpChunkGenerator; -use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, - StacksHttpRequest, -}; -use crate::net::test::TestEventObserver; -use crate::net::tests::inv::nakamoto::make_nakamoto_peer_from_invs; -use crate::net::{ProtocolFamily, TipRequest}; -use crate::util_lib::db::DBConn; - -#[test] -fn test_try_parse_request() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - - let request = StacksHttpRequest::new_get_nakamoto_block(addr.into(), StacksBlockId([0x11; 32])); - let bytes = request.try_serialize().unwrap(); - - debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); - - let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = getblock_v3::RPCNakamotoBlockRequestHandler::new(); - let mut parsed_request = http - .handle_try_parse_request( - &mut handler, - &parsed_preamble.expect_request(), - &bytes[offset..], - ) - .unwrap(); - - // parsed request consumes headers that would not be in a constructed reqeuest - parsed_request.clear_headers(); - let (preamble, contents) = parsed_request.destruct(); - - // consumed path args - assert_eq!(handler.block_id, Some(StacksBlockId([0x11; 32]))); - - assert_eq!(&preamble, request.preamble()); - - handler.restart(); - assert!(handler.block_id.is_none()); -} - -#[test] -fn test_try_make_response() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - - let test_observer = TestEventObserver::new(); - let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); - - let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); - let consensus_hash = rpc_test.consensus_hash.clone(); - - let mut requests = vec![]; - - // query existing block - let request = - StacksHttpRequest::new_get_nakamoto_block(addr.into(), nakamoto_chain_tip.clone()); - requests.push(request); - - // query non-existant block - let request = StacksHttpRequest::new_get_nakamoto_block(addr.into(), StacksBlockId([0x11; 32])); - requests.push(request); - - let mut responses = rpc_test.run(requests); - - // got the block - let response = responses.remove(0); - let resp = response.decode_nakamoto_block().unwrap(); - - assert_eq!( - StacksBlockHeader::make_index_block_hash(&consensus_hash, &resp.header.block_hash()), - nakamoto_chain_tip - ); - - // no block - let response = responses.remove(0); - let (preamble, body) = response.destruct(); - - assert_eq!(preamble.status_code, 404); -} - -#[test] -fn test_stream_nakamoto_blocks() { - let test_observer = TestEventObserver::new(); - let bitvecs = vec![vec![ - true, true, true, true, true, true, true, true, true, true, - ]]; - - let mut peer = - make_nakamoto_peer_from_invs(function_name!(), &test_observer, 10, 3, bitvecs.clone()); - - // can't stream a nonexistant block - assert!(NakamotoBlockStream::new( - peer.chainstate(), - StacksBlockId([0x11; 32]), - ConsensusHash([0x22; 20]), - StacksBlockId([0x33; 32]) - ) - .is_err()); - - let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let ih = sortdb.index_handle(&tip.sortition_id); - let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); - nakamoto_tip - }; - - let nakamoto_tip_block_id = StacksBlockId::new(&nakamoto_tip.0, &nakamoto_tip.1); - let nakamoto_header = { - let header_info = NakamotoChainState::get_block_header_nakamoto( - peer.chainstate().db(), - &nakamoto_tip_block_id, - ) - .unwrap() - .unwrap(); - header_info - .anchored_header - .as_stacks_nakamoto() - .cloned() - .unwrap() - }; - - let mut stream = NakamotoBlockStream::new( - peer.chainstate(), - nakamoto_tip_block_id, - nakamoto_tip.0.clone(), - nakamoto_header.parent_block_id.clone(), - ) - .unwrap(); - let mut all_block_bytes = vec![]; - - loop { - let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.is_empty() { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - all_block_bytes.len() - ); - all_block_bytes.append(&mut next_bytes); - } - - let staging_block = NakamotoBlock::consensus_deserialize(&mut &all_block_bytes[..]).unwrap(); - assert_eq!(staging_block.header.block_id(), nakamoto_tip_block_id); -} diff --git a/stackslib/src/net/api/tests/gettenure.rs b/stackslib/src/net/api/tests/gettenure.rs deleted file mode 100644 index c4f179acc94..00000000000 --- a/stackslib/src/net/api/tests/gettenure.rs +++ /dev/null @@ -1,205 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; -use clarity::vm::{ClarityName, ContractName}; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, -}; -use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; - -use super::TestRPC; -use crate::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandle}; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use crate::chainstate::stacks::db::blocks::test::*; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlock, StacksBlockHeader, StacksMicroblock, -}; -use crate::net::api::gettenure::NakamotoTenureStream; -use crate::net::api::*; -use crate::net::connection::ConnectionOptions; -use crate::net::http::HttpChunkGenerator; -use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, - StacksHttpRequest, -}; -use crate::net::test::TestEventObserver; -use crate::net::tests::inv::nakamoto::make_nakamoto_peer_from_invs; -use crate::net::{ProtocolFamily, TipRequest}; -use crate::util_lib::db::DBConn; - -#[test] -fn test_try_parse_request() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - - let request = - StacksHttpRequest::new_get_nakamoto_tenure(addr.into(), StacksBlockId([0x11; 32]), None); - let bytes = request.try_serialize().unwrap(); - - debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); - - let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut handler = gettenure::RPCNakamotoTenureRequestHandler::new(); - let mut parsed_request = http - .handle_try_parse_request( - &mut handler, - &parsed_preamble.expect_request(), - &bytes[offset..], - ) - .unwrap(); - - // parsed request consumes headers that would not be in a constructed reqeuest - parsed_request.clear_headers(); - let (preamble, contents) = parsed_request.destruct(); - - // consumed path args - assert_eq!(handler.block_id, Some(StacksBlockId([0x11; 32]))); - - assert_eq!(&preamble, request.preamble()); - - handler.restart(); - assert!(handler.block_id.is_none()); -} - -#[test] -fn test_try_make_response() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - - let test_observer = TestEventObserver::new(); - let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); - - let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); - let consensus_hash = rpc_test.consensus_hash.clone(); - - let mut requests = vec![]; - - // query existing tenure - let request = - StacksHttpRequest::new_get_nakamoto_tenure(addr.into(), nakamoto_chain_tip.clone(), None); - requests.push(request); - - // TODO: mid-tenure? - // TODO: just the start of the tenure? - - // query non-existant block - let request = - StacksHttpRequest::new_get_nakamoto_tenure(addr.into(), StacksBlockId([0x11; 32]), None); - requests.push(request); - - let mut responses = rpc_test.run(requests); - - // got the block - let response = responses.remove(0); - let resp = response.decode_nakamoto_tenure().unwrap(); - - info!("response: {:?}", &resp); - assert_eq!(resp.len(), 10); - assert_eq!(resp.first().unwrap().header.block_id(), nakamoto_chain_tip); - - // no block - let response = responses.remove(0); - let (preamble, body) = response.destruct(); - - assert_eq!(preamble.status_code, 404); -} - -#[test] -fn test_stream_nakamoto_tenure() { - let test_observer = TestEventObserver::new(); - let bitvecs = vec![vec![ - true, true, true, true, true, true, true, true, true, true, - ]]; - - let mut peer = - make_nakamoto_peer_from_invs(function_name!(), &test_observer, 10, 3, bitvecs.clone()); - - // can't stream a nonexistant tenure - assert!(NakamotoTenureStream::new( - peer.chainstate(), - StacksBlockId([0x11; 32]), - ConsensusHash([0x22; 20]), - StacksBlockId([0x33; 32]), - None - ) - .is_err()); - - let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let ih = sortdb.index_handle(&tip.sortition_id); - let nakamoto_tip = ih.get_nakamoto_tip().unwrap().unwrap(); - peer.sortdb = Some(sortdb); - nakamoto_tip - }; - - let nakamoto_tip_block_id = StacksBlockId::new(&nakamoto_tip.0, &nakamoto_tip.1); - let nakamoto_header = { - let header_info = NakamotoChainState::get_block_header_nakamoto( - peer.chainstate().db(), - &nakamoto_tip_block_id, - ) - .unwrap() - .unwrap(); - header_info - .anchored_header - .as_stacks_nakamoto() - .cloned() - .unwrap() - }; - - let mut stream = NakamotoTenureStream::new( - peer.chainstate(), - nakamoto_tip_block_id.clone(), - nakamoto_header.consensus_hash.clone(), - nakamoto_header.parent_block_id.clone(), - None, - ) - .unwrap(); - let mut all_block_bytes = vec![]; - - loop { - let mut next_bytes = stream.generate_next_chunk().unwrap(); - if next_bytes.is_empty() { - break; - } - test_debug!( - "Got {} more bytes from staging; add to {} total", - next_bytes.len(), - all_block_bytes.len() - ); - all_block_bytes.append(&mut next_bytes); - } - - let ptr = &mut all_block_bytes.as_slice(); - let mut blocks = vec![]; - while ptr.len() > 0 { - let block = NakamotoBlock::consensus_deserialize(ptr).unwrap(); - blocks.push(block); - } - - info!("blocks = {:?}", &blocks); - assert_eq!(blocks.len(), 10); - assert_eq!( - blocks.first().unwrap().header.block_id(), - nakamoto_tip_block_id - ); -} diff --git a/stackslib/src/net/api/tests/gettenureinfo.rs b/stackslib/src/net/api/tests/gettenureinfo.rs deleted file mode 100644 index db53a5daca2..00000000000 --- a/stackslib/src/net/api/tests/gettenureinfo.rs +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2023 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; - -use clarity::vm::types::{QualifiedContractIdentifier, StacksAddressExtensions}; -use clarity::vm::{ClarityName, ContractName}; -use serde_json; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::StacksAddress; -use stacks_common::types::net::PeerHost; -use stacks_common::types::Address; - -use super::test_rpc; -use crate::net::api::getinfo::RPCPeerInfoData; -use crate::net::api::tests::TestRPC; -use crate::net::api::*; -use crate::net::connection::ConnectionOptions; -use crate::net::httpcore::{ - HttpPreambleExtensions, HttpRequestContentsExtensions, RPCRequestHandler, StacksHttp, - StacksHttpRequest, -}; -use crate::net::test::TestEventObserver; -use crate::net::{ProtocolFamily, TipRequest}; - -#[test] -fn test_try_parse_request() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - let mut http = StacksHttp::new(addr.clone(), &ConnectionOptions::default()); - - let request = StacksHttpRequest::new_get_nakamoto_tenure_info(addr.into()); - - let bytes = request.try_serialize().unwrap(); - - debug!("Request:\n{}\n", std::str::from_utf8(&bytes).unwrap()); - - let (parsed_preamble, offset) = http.read_preamble(&bytes).unwrap(); - let mut parsed_request = http - .try_parse_request(&parsed_preamble.expect_request(), &bytes[offset..]) - .unwrap(); - - // parsed request consumes headers that would not be in a constructed reqeuest - parsed_request.clear_headers(); - let (preamble, contents) = parsed_request.destruct(); - - assert_eq!(&preamble, request.preamble()); -} - -#[test] -fn test_try_make_response() { - let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); - - let test_observer = TestEventObserver::new(); - let rpc_test = TestRPC::setup_nakamoto(function_name!(), &test_observer); - - let nakamoto_chain_tip = rpc_test.canonical_tip.clone(); - let consensus_hash = rpc_test.consensus_hash.clone(); - - let mut requests = vec![]; - - // query existing account - let request = StacksHttpRequest::new_get_nakamoto_tenure_info(addr.into()); - requests.push(request); - - let mut responses = rpc_test.run(requests); - - let response = responses.remove(0); - debug!( - "Response:\n{}\n", - std::str::from_utf8(&response.try_serialize().unwrap()).unwrap() - ); - - let resp = response.decode_nakamoto_tenure_info().unwrap(); - assert_eq!(resp.consensus_hash, consensus_hash); - assert_eq!(resp.tip_block_id, nakamoto_chain_tip); -} diff --git a/stackslib/src/net/api/tests/mod.rs b/stackslib/src/net/api/tests/mod.rs index ce67147a9e4..e58c56562e7 100644 --- a/stackslib/src/net/api/tests/mod.rs +++ b/stackslib/src/net/api/tests/mod.rs @@ -31,7 +31,6 @@ use stacks_common::util::pipe::Pipe; use crate::burnchains::bitcoin::indexer::BitcoinIndexer; use crate::burnchains::Txid; use crate::chainstate::burn::db::sortdb::SortitionDB; -use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::miner::{BlockBuilderSettings, StacksMicroblockBuilder}; use crate::chainstate::stacks::{ @@ -44,8 +43,7 @@ use crate::net::db::PeerDB; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; use crate::net::relay::Relayer; use crate::net::rpc::ConversationHttp; -use crate::net::test::{TestEventObserver, TestPeer, TestPeerConfig}; -use crate::net::tests::inv::nakamoto::make_nakamoto_peers_from_invs; +use crate::net::test::{TestPeer, TestPeerConfig}; use crate::net::{ Attachment, AttachmentInstance, RPCHandlerArgs, StackerDBConfig, StacksNodeState, UrlString, }; @@ -55,7 +53,6 @@ mod getaccount; mod getattachment; mod getattachmentsinv; mod getblock; -mod getblock_v3; mod getconstantval; mod getcontractabi; mod getcontractsrc; @@ -72,8 +69,6 @@ mod getpoxinfo; mod getstackerdbchunk; mod getstackerdbmetadata; mod getstxtransfercost; -mod gettenure; -mod gettenureinfo; mod gettransaction_unconfirmed; mod liststackerdbreplicas; mod postblock; @@ -199,13 +194,11 @@ pub struct TestRPC<'a> { /// list of microblock transactions pub microblock_txids: Vec, /// next block to post, and its consensus hash - pub next_block: Option<(ConsensusHash, StacksBlock)>, + pub next_block: (ConsensusHash, StacksBlock), /// next microblock to post (may already be posted) - pub next_microblock: Option, + pub next_microblock: StacksMicroblock, /// transactions that can be posted to the mempool pub sendable_txs: Vec, - /// whether or not to maintain unconfirmed microblocks (e.g. this is false for nakamoto) - pub unconfirmed_state: bool, } impl<'a> TestRPC<'a> { @@ -294,8 +287,6 @@ impl<'a> TestRPC<'a> { peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - let burnchain = peer_1_config.burnchain.clone(); - let mut peer_1 = TestPeer::new(peer_1_config); let mut peer_2 = TestPeer::new(peer_2_config); @@ -443,7 +434,6 @@ impl<'a> TestRPC<'a> { }; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -721,7 +711,6 @@ impl<'a> TestRPC<'a> { }; let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -814,96 +803,9 @@ impl<'a> TestRPC<'a> { microblock_tip_hash: microblock.block_hash(), mempool_txids, microblock_txids, - next_block: Some((next_consensus_hash, next_stacks_block)), - next_microblock: Some(microblock), + next_block: (next_consensus_hash, next_stacks_block), + next_microblock: microblock, sendable_txs, - unconfirmed_state: true, - } - } - - /// Set up the peers as Nakamoto nodes - pub fn setup_nakamoto(test_name: &str, observer: &'a TestEventObserver) -> TestRPC<'a> { - let bitvecs = vec![vec![ - true, true, true, true, true, true, true, true, true, true, - ]]; - - let (mut peer, mut other_peers) = - make_nakamoto_peers_from_invs(function_name!(), observer, 10, 3, bitvecs.clone(), 1); - let mut other_peer = other_peers.pop().unwrap(); - - let peer_1_indexer = BitcoinIndexer::new_unit_test(&peer.config.burnchain.working_dir); - let peer_2_indexer = - BitcoinIndexer::new_unit_test(&other_peer.config.burnchain.working_dir); - - let convo_1 = ConversationHttp::new( - format!("127.0.0.1:{}", peer.config.http_port) - .parse::() - .unwrap(), - Some(UrlString::try_from(format!("http://peer1.com")).unwrap()), - peer.to_peer_host(), - &peer.config.connection_opts, - 0, - 32, - ); - - let convo_2 = ConversationHttp::new( - format!("127.0.0.1:{}", other_peer.config.http_port) - .parse::() - .unwrap(), - Some(UrlString::try_from(format!("http://peer2.com")).unwrap()), - other_peer.to_peer_host(), - &other_peer.config.connection_opts, - 1, - 32, - ); - - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - let nakamoto_tip = { - let sortdb = peer.sortdb.take().unwrap(); - let tip = - NakamotoChainState::get_canonical_block_header(peer.chainstate().db(), &sortdb) - .unwrap() - .unwrap(); - peer.sortdb = Some(sortdb); - tip - }; - - // sanity check - let other_tip = - SortitionDB::get_canonical_burn_chain_tip(other_peer.sortdb().conn()).unwrap(); - let other_nakamoto_tip = { - let sortdb = other_peer.sortdb.take().unwrap(); - let tip = NakamotoChainState::get_canonical_block_header( - other_peer.chainstate().db(), - &sortdb, - ) - .unwrap() - .unwrap(); - other_peer.sortdb = Some(sortdb); - tip - }; - - assert_eq!(tip, other_tip); - assert_eq!(nakamoto_tip, other_nakamoto_tip); - - TestRPC { - privk1: peer.config.private_key.clone(), - privk2: other_peer.config.private_key.clone(), - peer_1: peer, - peer_2: other_peer, - peer_1_indexer, - peer_2_indexer, - convo_1, - convo_2, - canonical_tip: nakamoto_tip.index_block_hash(), - consensus_hash: nakamoto_tip.consensus_hash.clone(), - microblock_tip_hash: BlockHeaderHash([0x00; 32]), - mempool_txids: vec![], - microblock_txids: vec![], - next_block: None, - next_microblock: None, - sendable_txs: vec![], - unconfirmed_state: false, } } @@ -916,13 +818,9 @@ impl<'a> TestRPC<'a> { let peer_2_indexer = self.peer_2_indexer; let mut convo_1 = self.convo_1; let mut convo_2 = self.convo_2; - let unconfirmed_state = self.unconfirmed_state; let mut responses = vec![]; for request in requests.into_iter() { - peer_1.refresh_burnchain_view(); - peer_2.refresh_burnchain_view(); - convo_1.send_request(request.clone()).unwrap(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let peer_2_mempool = peer_2.mempool.take().unwrap(); @@ -934,13 +832,8 @@ impl<'a> TestRPC<'a> { let peer_1_sortdb = peer_1.sortdb.take().unwrap(); let mut peer_1_stacks_node = peer_1.stacks_node.take().unwrap(); - if unconfirmed_state { - Relayer::setup_unconfirmed_state( - &mut peer_1_stacks_node.chainstate, - &peer_1_sortdb, - ) + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) .unwrap(); - } { let rpc_args = RPCHandlerArgs::default(); @@ -976,13 +869,8 @@ impl<'a> TestRPC<'a> { ) .unwrap(); - if unconfirmed_state { - Relayer::setup_unconfirmed_state( - &mut peer_2_stacks_node.chainstate, - &peer_2_sortdb, - ) + Relayer::setup_unconfirmed_state(&mut peer_2_stacks_node.chainstate, &peer_2_sortdb) .unwrap(); - } { let rpc_args = RPCHandlerArgs::default(); @@ -1022,13 +910,8 @@ impl<'a> TestRPC<'a> { ) .unwrap(); - if unconfirmed_state { - Relayer::setup_unconfirmed_state( - &mut peer_1_stacks_node.chainstate, - &peer_1_sortdb, - ) + Relayer::setup_unconfirmed_state(&mut peer_1_stacks_node.chainstate, &peer_1_sortdb) .unwrap(); - } { let rpc_args = RPCHandlerArgs::default(); diff --git a/stackslib/src/net/api/tests/postblock.rs b/stackslib/src/net/api/tests/postblock.rs index 287e97f613b..c3d1f293595 100644 --- a/stackslib/src/net/api/tests/postblock.rs +++ b/stackslib/src/net/api/tests/postblock.rs @@ -96,26 +96,33 @@ fn test_try_make_response() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let rpc_test = TestRPC::setup(function_name!()); - let next_block = rpc_test.next_block.clone().unwrap(); - let stacks_block_id = - StacksBlockHeader::make_index_block_hash(&next_block.0, &next_block.1.block_hash()); + let stacks_block_id = StacksBlockHeader::make_index_block_hash( + &rpc_test.next_block.0, + &rpc_test.next_block.1.block_hash(), + ); let mut requests = vec![]; // post the block - let request = - StacksHttpRequest::new_post_block(addr.into(), next_block.0.clone(), next_block.1.clone()); + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); requests.push(request); // idempotent - let request = - StacksHttpRequest::new_post_block(addr.into(), next_block.0.clone(), next_block.1.clone()); + let request = StacksHttpRequest::new_post_block( + addr.into(), + rpc_test.next_block.0.clone(), + rpc_test.next_block.1.clone(), + ); requests.push(request); // fails if the consensus hash is not recognized let request = StacksHttpRequest::new_post_block( addr.into(), ConsensusHash([0x11; 20]), - next_block.1.clone(), + rpc_test.next_block.1.clone(), ); requests.push(request); diff --git a/stackslib/src/net/api/tests/postmicroblock.rs b/stackslib/src/net/api/tests/postmicroblock.rs index 487e9c17c69..9688b4a3fc5 100644 --- a/stackslib/src/net/api/tests/postmicroblock.rs +++ b/stackslib/src/net/api/tests/postmicroblock.rs @@ -103,7 +103,7 @@ fn test_try_make_response() { let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 33333); let test_rpc = TestRPC::setup_ex(function_name!(), false); - let mblock = test_rpc.next_microblock.clone().unwrap(); + let mblock = test_rpc.next_microblock.clone(); let mut requests = vec![]; diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 2ebcb71316e..117ada9ba89 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -18,6 +18,7 @@ use std::collections::{BinaryHeap, HashMap, HashSet}; use std::{thread, time}; use clarity::vm::types::QualifiedContractIdentifier; +use proptest::prelude::*; use stacks_common::types::chainstate::{BlockHeaderHash, StacksBlockId}; use stacks_common::types::net::{PeerAddress, PeerHost}; use stacks_common::util::hash::Hash160; diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 267f9e71bcb..1b54241197c 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -27,7 +27,7 @@ use stacks_common::types::net::PeerAddress; use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; +use stacks_common::util::{get_epoch_time_secs, log}; use crate::burnchains::{Burnchain, BurnchainView, PublicKey}; use crate::chainstate::burn::db::sortdb; @@ -344,12 +344,6 @@ pub struct ConversationP2P { /// where does this peer's data live? Set to a 0-length string if not known. pub data_url: UrlString, - /// Resolved IP address of the data URL - pub data_ip: Option, - /// Time to try DNS reesolution again - pub dns_deadline: u128, - /// Ongoing request to DNS resolver - pub dns_request: Option, /// what this peer believes is the height of the burnchain pub burnchain_tip_height: u64, @@ -569,9 +563,6 @@ impl ConversationP2P { peer_expire_block_height: 0, data_url: UrlString::try_from("".to_string()).unwrap(), - data_ip: None, - dns_deadline: 0, - dns_request: None, burnchain_tip_height: 0, burnchain_tip_burn_header_hash: BurnchainHeaderHash::zero(), @@ -1354,8 +1345,8 @@ impl ConversationP2P { self.update_from_stacker_db_handshake_data(stackerdb_accept); } else { // remote peer's burnchain view has diverged, so assume no longer replicating (we - // can't talk to it anyway). This can happen once per burnchain block for a few - // seconds as nodes begin processing the next Stacks blocks, but it's harmless -- at worst, it + // can't talk to it anyway). This can happen once per reward cycle for a few + // minutes as nodes begin the next reward cycle, but it's harmless -- at worst, it // just means that no stacker DB replication happens between this peer and // localhost during this time. self.clear_stacker_db_handshake_data(); @@ -1907,16 +1898,13 @@ impl ConversationP2P { let local_peer = network.get_local_peer(); let burnchain_view = network.get_chain_view(); - // remote peer's Stacks chain tip is different from ours, meaning it might have a different - // stackerdb configuration view (and we won't be able to authenticate their chunks, and - // vice versa) if burnchain_view.rc_consensus_hash != getchunkinv.rc_consensus_hash { debug!( "{:?}: NACK StackerDBGetChunkInv; {} != {}", local_peer, &burnchain_view.rc_consensus_hash, &getchunkinv.rc_consensus_hash ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleView, + NackErrorCodes::InvalidPoxFork, ))); } @@ -1958,7 +1946,7 @@ impl ConversationP2P { local_peer, &burnchain_view.rc_consensus_hash, &getchunk.rc_consensus_hash ); return Ok(StacksMessageType::Nack(NackData::new( - NackErrorCodes::StaleView, + NackErrorCodes::InvalidPoxFork, ))); } @@ -2699,148 +2687,6 @@ impl ConversationP2P { } } - /// Are we trying to resolve DNS? - pub fn waiting_for_dns(&self) -> bool { - self.dns_deadline < u128::MAX - } - - /// Try to get the IPv4 or IPv6 address out of a data URL. - fn try_decode_data_url_ipaddr(data_url: &UrlString) -> Option { - // need to begin resolution - // NOTE: should always succeed, since a UrlString shouldn't decode unless it's a valid URL or the empty string - let url = data_url.parse_to_block_url().ok()?; - let port = url.port_or_known_default()?; - let ip_addr_opt = match url.host() { - Some(url::Host::Ipv4(addr)) => { - // have IPv4 address already - Some(SocketAddr::new(IpAddr::V4(addr), port)) - } - Some(url::Host::Ipv6(addr)) => { - // have IPv6 address already - Some(SocketAddr::new(IpAddr::V6(addr), port)) - } - _ => None, - }; - ip_addr_opt - } - - /// Attempt to resolve the hostname of a conversation's data URL to its IP address. - fn try_resolve_data_url_host( - &mut self, - dns_client_opt: &mut Option<&mut DNSClient>, - dns_timeout: u128, - ) { - if self.data_ip.is_some() { - return; - } - if self.data_url.is_empty() { - return; - } - if let Some(ipaddr) = Self::try_decode_data_url_ipaddr(&self.data_url) { - // don't need to resolve! - debug!( - "{}: Resolved data URL {} to {}", - &self, &self.data_url, &ipaddr - ); - self.data_ip = Some(ipaddr); - return; - } - - let Some(dns_client) = dns_client_opt else { - return; - }; - if get_epoch_time_ms() < self.dns_deadline { - return; - } - if let Some(dns_request) = self.dns_request.take() { - // perhaps resolution completed? - match dns_client.poll_lookup(&dns_request.host, dns_request.port) { - Ok(query_result_opt) => { - // just take one of the addresses, if there are any - self.data_ip = query_result_opt - .map(|query_result| match query_result.result { - Ok(mut ips) => ips.pop(), - Err(e) => { - warn!( - "{}: Failed to resolve data URL {}: {:?}", - self, &self.data_url, &e - ); - - // don't try again - self.dns_deadline = u128::MAX; - None - } - }) - .flatten(); - if let Some(ip) = self.data_ip.as_ref() { - debug!("{}: Resolved data URL {} to {}", &self, &self.data_url, &ip); - } else { - info!( - "{}: Failed to resolve URL {}: no IP addresses found", - &self, &self.data_url - ); - } - // don't try again - self.dns_deadline = u128::MAX; - } - Err(e) => { - warn!("DNS lookup failed on {}: {:?}", &self.data_url, &e); - - // don't try again - self.dns_deadline = u128::MAX; - } - } - } - - // need to begin resolution - // NOTE: should always succeed, since a UrlString shouldn't decode unless it's a valid URL or the empty string - let Ok(url) = self.data_url.parse_to_block_url() else { - return; - }; - let port = match url.port_or_known_default() { - Some(p) => p, - None => { - warn!("Unsupported URL {:?}: unknown port", &url); - - // don't try again - self.dns_deadline = u128::MAX; - return; - } - }; - let ip_addr_opt = match url.host() { - Some(url::Host::Domain(domain)) => { - // need to resolve a DNS name - let deadline = get_epoch_time_ms().saturating_add(dns_timeout); - if let Err(e) = dns_client.queue_lookup(domain, port, deadline) { - debug!("Failed to queue DNS resolution of {}: {:?}", &url, &e); - return; - } - self.dns_request = Some(DNSRequest::new(domain.to_string(), port, 0)); - self.dns_deadline = deadline; - None - } - Some(url::Host::Ipv4(addr)) => { - // have IPv4 address already - Some(SocketAddr::new(IpAddr::V4(addr), port)) - } - Some(url::Host::Ipv6(addr)) => { - // have IPv6 address already - Some(SocketAddr::new(IpAddr::V6(addr), port)) - } - None => { - warn!("Unsupported URL {:?}", &url); - - // don't try again - self.dns_deadline = u128::MAX; - return; - } - }; - self.data_ip = ip_addr_opt; - if let Some(ip) = self.data_ip.as_ref() { - debug!("{}: Resolved data URL {} to {}", &self, &self.data_url, &ip); - } - } - /// Carry on a conversation with the remote peer. /// Called from the p2p network thread, so no need for a network handle. /// Attempts to fulfill requests in other threads as a result of processing a message. @@ -2851,7 +2697,6 @@ impl ConversationP2P { network: &mut PeerNetwork, sortdb: &SortitionDB, chainstate: &mut StacksChainState, - dns_client_opt: &mut Option<&mut DNSClient>, ibd: bool, ) -> Result, net_error> { let num_inbound = self.connection.inbox_len(); @@ -2951,9 +2796,6 @@ impl ConversationP2P { } } - // while we're at it, update our IP address if we have a pending DNS resolution (or start - // the process if we need it) - self.try_resolve_data_url_host(dns_client_opt, network.get_connection_opts().dns_timeout); Ok(unsolicited) } @@ -3440,14 +3282,14 @@ mod test { test_debug!("send handshake"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept test_debug!("send handshake-accept"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -3721,14 +3563,14 @@ mod test { test_debug!("send handshake"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept test_debug!("send handshake-accept"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -3900,13 +3742,13 @@ mod test { // convo_2 receives it and automatically rejects it. convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakreject convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -4048,13 +3890,12 @@ mod test { // convo_2 receives it and processes it, and barfs convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); - let unhandled_2_err = - convo_2.chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false); + let unhandled_2_err = convo_2.chat(&mut net_2, &sortdb_2, &mut chainstate_2, false); // convo_1 gets a nack and consumes it convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); // the waiting reply aborts on disconnect @@ -4207,13 +4048,13 @@ mod test { // convo_2 receives it and processes it, and rejects it convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets a handshake-reject and consumes it convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); // the waiting reply aborts on disconnect @@ -4342,13 +4183,13 @@ mod test { // convo_2 receives it convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakaccept convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -4396,13 +4237,13 @@ mod test { // convo_2 receives it convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakaccept convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -4539,13 +4380,13 @@ mod test { // convo_2 receives it and processes it automatically (consuming it), and give back a handshake reject convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); // convo_1 gets a handshake reject and consumes it convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // get back handshake reject @@ -4700,7 +4541,7 @@ mod test { &mut convo_2, ); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept @@ -4712,7 +4553,7 @@ mod test { &mut convo_1, ); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); @@ -4873,7 +4714,7 @@ mod test { &mut convo_2, ); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept @@ -4883,7 +4724,7 @@ mod test { &mut convo_1, ); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_handshake_1 = rh_handshake_1.recv(0).unwrap(); @@ -5083,13 +4924,13 @@ mod test { // convo_2 will reply with a nack since peer_1 hasn't authenticated yet convo_send_recv(&mut convo_1, vec![&mut rh_ping_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a nack convo_send_recv(&mut convo_2, vec![&mut rh_ping_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_ping_1.recv(0).unwrap(); @@ -5256,12 +5097,12 @@ mod test { convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); // connection should break off since nodes ignore unsolicited messages @@ -5402,14 +5243,14 @@ mod test { test_debug!("send handshake"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept test_debug!("send handshake-accept"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5485,14 +5326,14 @@ mod test { test_debug!("send getblocksinv"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets back a blocksinv message test_debug!("send blocksinv"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5538,14 +5379,14 @@ mod test { test_debug!("send getblocksinv (diverged)"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets back a nack message test_debug!("send nack (diverged)"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5679,14 +5520,14 @@ mod test { test_debug!("send handshake"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 has a handshakeaccept test_debug!("send handshake-accept"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5761,14 +5602,14 @@ mod test { test_debug!("send getnakamotoinv"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets back a nakamotoinv message test_debug!("send nakamotoinv"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5813,14 +5654,14 @@ mod test { test_debug!("send getnakamotoinv (diverged)"); convo_send_recv(&mut convo_1, vec![&mut rh_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets back a nack message test_debug!("send nack (diverged)"); convo_send_recv(&mut convo_2, vec![&mut rh_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let reply_1 = rh_1.recv(0).unwrap(); @@ -5953,14 +5794,14 @@ mod test { test_debug!("send natpunch {:?}", &natpunch_1); convo_send_recv(&mut convo_1, vec![&mut rh_natpunch_1], &mut convo_2); let unhandled_2 = convo_2 - .chat(&mut net_2, &sortdb_2, &mut chainstate_2, &mut None, false) + .chat(&mut net_2, &sortdb_2, &mut chainstate_2, false) .unwrap(); // convo_1 gets back a natpunch reply test_debug!("reply natpunch-reply"); convo_send_recv(&mut convo_2, vec![&mut rh_natpunch_1], &mut convo_1); let unhandled_1 = convo_1 - .chat(&mut net_1, &sortdb_1, &mut chainstate_1, &mut None, false) + .chat(&mut net_1, &sortdb_1, &mut chainstate_1, false) .unwrap(); let natpunch_reply_1 = rh_natpunch_1.recv(0).unwrap(); diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download.rs similarity index 62% rename from stackslib/src/net/download/epoch2x.rs rename to stackslib/src/net/download.rs index f5b4b44a3a6..f19d6f47d0e 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download.rs @@ -33,6 +33,7 @@ use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; use crate::burnchains::{Burnchain, BurnchainView}; use crate::chainstate::burn::db::sortdb::{BlockHeaderCache, SortitionDB, SortitionDBConn}; use crate::chainstate::burn::BlockSnapshot; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::db::StacksChainState; use crate::chainstate::stacks::{Error as chainstate_error, StacksBlockHeader}; use crate::core::{ @@ -46,7 +47,7 @@ use crate::net::db::{PeerDB, *}; use crate::net::dns::*; use crate::net::http::HttpRequestContents; use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; +use crate::net::inv::inv2x::InvState; use crate::net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use crate::net::p2p::PeerNetwork; use crate::net::rpc::*; @@ -231,11 +232,11 @@ pub struct BlockDownloader { microblocks: HashMap>, /// statistics on peers' data-plane endpoints - pub(crate) dead_peers: Vec, - pub(crate) broken_peers: Vec, + dead_peers: Vec, + broken_peers: Vec, broken_neighbors: Vec, // disconnect peers who report invalid block inventories too - pub(crate) blocked_urls: HashMap, // URLs that chronically don't work, and when we can try them again + blocked_urls: HashMap, // URLs that chronically don't work, and when we can try them again /// how often to download download_interval: u64, @@ -2501,3 +2502,1524 @@ impl PeerNetwork { )) } } + +#[cfg(test)] +pub mod test { + use std::collections::HashMap; + + use clarity::vm::clarity::ClarityConnection; + use clarity::vm::costs::ExecutionCost; + use clarity::vm::execute; + use clarity::vm::representations::*; + use rand::Rng; + use stacks_common::util::hash::*; + use stacks_common::util::sleep_ms; + use stacks_common::util::vrf::VRFProof; + + use super::*; + use crate::burnchains::tests::TestMiner; + use crate::chainstate::burn::db::sortdb::*; + use crate::chainstate::burn::operations::*; + use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; + use crate::chainstate::stacks::miner::*; + use crate::chainstate::stacks::tests::*; + use crate::chainstate::stacks::*; + use crate::net::codec::*; + use crate::net::inv::inv2x::*; + use crate::net::relay::*; + use crate::net::test::*; + use crate::net::*; + use crate::stacks_common::types::PublicKey; + use crate::util_lib::strings::*; + use crate::util_lib::test::*; + + fn get_peer_availability( + peer: &mut TestPeer, + start_height: u64, + end_height: u64, + ) -> Vec<(ConsensusHash, Option, Vec)> { + let inv_state = peer.network.inv_state.take().unwrap(); + let availability = peer + .with_network_state( + |ref mut sortdb, + ref mut _chainstate, + ref mut network, + ref mut _relayer, + ref mut _mempool| { + BlockDownloader::get_block_availability( + &network.local_peer, + &inv_state, + sortdb, + &mut network.header_cache, + start_height, + end_height, + ) + }, + ) + .unwrap(); + peer.network.inv_state = Some(inv_state); + availability + } + + #[test] + fn test_get_block_availability() { + with_timeout(600, || { + let mut peer_1_config = TestPeerConfig::new(function_name!(), 3210, 3211); + let mut peer_2_config = TestPeerConfig::new(function_name!(), 3212, 3213); + + // don't bother downloading blocks + peer_1_config.connection_opts.disable_block_download = true; + peer_2_config.connection_opts.disable_block_download = true; + + peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); + peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); + + let reward_cycle_length = + peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; + + let mut peer_1 = TestPeer::new(peer_1_config); + let mut peer_2 = TestPeer::new(peer_2_config); + + let num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + }; + + let mut block_data = vec![]; + + for i in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peer_2.next_burnchain_block(burn_ops.clone()); + peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peer_1.next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peer_2.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push((sn.consensus_hash.clone(), stacks_block, microblocks)); + } + + let num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer_1.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height - peer_1.config.burnchain.first_block_height + }; + + let mut round = 0; + let mut inv_1_count = 0; + let mut inv_2_count = 0; + let mut all_blocks_available = false; + + // can only learn about 1 reward cycle's blocks at a time in PoX + while inv_1_count < reward_cycle_length + && inv_2_count < reward_cycle_length + && !all_blocks_available + { + let result_1 = peer_1.step(); + let result_2 = peer_2.step(); + + inv_1_count = match peer_1.network.inv_state { + Some(ref inv) => { + let mut count = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); + + // continue until peer 1 knows that peer 2 has blocks + let peer_1_availability = get_peer_availability( + &mut peer_1, + first_stacks_block_height, + first_stacks_block_height + reward_cycle_length, + ); + + let mut all_availability = true; + for (_, _, neighbors) in peer_1_availability.iter() { + if neighbors.len() != 1 { + // not done yet + count = 0; + all_availability = false; + break; + } + assert_eq!(neighbors[0], peer_2.config.to_neighbor().addr); + } + + all_blocks_available = all_availability; + + count + } + None => 0, + }; + + inv_2_count = match peer_2.network.inv_state { + Some(ref inv) => inv.get_inv_sortitions(&peer_1.to_neighbor().addr), + None => 0, + }; + + // nothing should break + match peer_1.network.inv_state { + Some(ref inv) => { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + } + None => {} + } + + match peer_2.network.inv_state { + Some(ref inv) => { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + } + None => {} + } + + round += 1; + } + + info!("Completed walk round {} step(s)", round); + + let availability = get_peer_availability( + &mut peer_1, + first_stacks_block_height, + first_stacks_block_height + reward_cycle_length, + ); + + eprintln!("availability.len() == {}", availability.len()); + eprintln!("block_data.len() == {}", block_data.len()); + + assert_eq!(availability.len() as u64, reward_cycle_length); + assert_eq!(block_data.len() as u64, num_blocks); + + for ( + (sn_consensus_hash, stacks_block, microblocks), + (consensus_hash, stacks_block_hash_opt, neighbors), + ) in block_data.iter().zip(availability.iter()) + { + assert_eq!(*consensus_hash, *sn_consensus_hash); + assert!(stacks_block_hash_opt.is_some()); + assert_eq!(*stacks_block_hash_opt, Some(stacks_block.block_hash())); + } + }) + } + + fn get_blocks_inventory( + peer: &mut TestPeer, + start_height: u64, + end_height: u64, + ) -> BlocksInvData { + let block_hashes = { + let num_headers = end_height - start_height; + let ic = peer.sortdb.as_mut().unwrap().index_conn(); + let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); + let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) + .unwrap() + .unwrap(); + ic.get_stacks_header_hashes( + num_headers + 1, + &ancestor.consensus_hash, + &mut BlockHeaderCache::new(), + ) + .unwrap() + }; + + let inv = peer + .chainstate() + .get_blocks_inventory(&block_hashes) + .unwrap(); + inv + } + + pub fn run_get_blocks_and_microblocks( + test_name: &str, + port_base: u16, + num_peers: usize, + make_topology: T, + block_generator: F, + mut peer_func: P, + mut check_breakage: C, + mut done_func: D, + ) -> Vec + where + T: FnOnce(&mut Vec) -> (), + F: FnOnce( + usize, + &mut Vec, + ) -> Vec<( + ConsensusHash, + Option, + Option>, + )>, + P: FnMut(&mut Vec) -> (), + C: FnMut(&mut TestPeer) -> bool, + D: FnMut(&mut Vec) -> bool, + { + assert!(num_peers > 0); + let first_sortition_height = 0; + + let mut peer_configs = vec![]; + for i in 0..num_peers { + let mut peer_config = TestPeerConfig::new( + test_name, + port_base + ((2 * i) as u16), + port_base + ((2 * i + 1) as u16), + ); + peer_config.burnchain.first_block_height = first_sortition_height; + + peer_configs.push(peer_config); + } + + make_topology(&mut peer_configs); + + let mut peers = vec![]; + for conf in peer_configs.drain(..) { + let peer = TestPeer::new(conf); + peers.push(peer); + } + + let mut num_blocks = 10; + let first_stacks_block_height = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + }; + + let block_data = block_generator(num_blocks, &mut peers); + num_blocks = block_data.len(); + + let num_burn_blocks = { + let sn = + SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) + .unwrap(); + sn.block_height + }; + + let mut dns_clients = vec![]; + let mut dns_threads = vec![]; + + for _ in 0..peers.len() { + let (dns_client, dns_thread_handle) = dns_thread_start(100); + dns_clients.push(dns_client); + dns_threads.push(dns_thread_handle); + } + + let mut round = 0; + let mut peer_invs = vec![BlocksInvData::empty(); num_peers]; + + let mut done = false; + + loop { + peer_func(&mut peers); + + let mut peers_behind_burnchain = false; + for i in 0..peers.len() { + let peer = &mut peers[i]; + + test_debug!("======= peer {} step begin =========", i); + let mut result = peer.step_dns(&mut dns_clients[i]).unwrap(); + + let lp = peer.network.local_peer.clone(); + peer.with_db_state(|sortdb, chainstate, relayer, mempool| { + relayer.process_network_result( + &lp, + &mut result, + sortdb, + chainstate, + mempool, + false, + None, + None, + ) + }) + .unwrap(); + + test_debug!( + "Peer {} processes {} blocks and {} microblock streams", + i, + result.blocks.len(), + result.confirmed_microblocks.len() + ); + + peer.with_peer_state(|peer, sortdb, chainstate, mempool| { + for i in 0..(result.blocks.len() + result.confirmed_microblocks.len() + 1) { + peer.coord.handle_new_stacks_block().unwrap(); + + let pox_id = { + let ic = sortdb.index_conn(); + let tip_sort_id = + SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); + let sortdb_reader = + SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); + sortdb_reader.get_pox_id().unwrap() + }; + + test_debug!( + "\n\n{:?}: after stacks block, new tip PoX ID is {:?}\n\n", + &peer.to_neighbor().addr, + &pox_id + ); + } + Ok(()) + }) + .unwrap(); + + assert!(check_breakage(peer)); + + let peer_num_burn_blocks = { + let sn = SortitionDB::get_canonical_burn_chain_tip( + peer.sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + sn.block_height + }; + + peer_invs[i] = get_blocks_inventory(peer, 0, peer_num_burn_blocks); + peers_behind_burnchain = + peer_num_burn_blocks != num_burn_blocks || peers_behind_burnchain; + + test_debug!("Peer {} block inventory: {:?}", i, &peer_invs[i]); + + if let Some(ref inv) = peer.network.inv_state { + test_debug!("Peer {} inventory stats: {:?}", i, &inv.block_stats); + } + + let (mut inbound, mut outbound) = peer.network.dump_peer_table(); + + inbound.sort(); + outbound.sort(); + + test_debug!( + "Peer {} outbound ({}): {}", + i, + outbound.len(), + outbound.join(", ") + ); + test_debug!( + "Peer {} inbound ({}): {}", + i, + inbound.len(), + inbound.join(", ") + ); + test_debug!("======= peer {} step end =========", i); + } + + if !done { + done = !peers_behind_burnchain; + + for i in 0..num_peers { + for b in 0..num_blocks { + if !peer_invs[i].has_ith_block( + ((b as u64) + first_stacks_block_height - first_sortition_height) + as u16, + ) { + if block_data[b].1.is_some() { + test_debug!( + "Peer {} is missing block {} at sortition height {} (between {} and {})", + i, + b, + (b as u64) + first_stacks_block_height - first_sortition_height, + first_stacks_block_height - first_sortition_height, + first_stacks_block_height - first_sortition_height + + (num_blocks as u64), + ); + done = false; + } + } + } + for b in 1..(num_blocks - 1) { + if !peer_invs[i].has_ith_microblock_stream( + ((b as u64) + first_stacks_block_height - first_sortition_height) + as u16, + ) { + if block_data[b].2.is_some() { + test_debug!( + "Peer {} is missing microblock stream {} (between {} and {})", + i, + (b as u64) + first_stacks_block_height - first_sortition_height, + first_stacks_block_height - first_sortition_height, + first_stacks_block_height - first_sortition_height + + ((num_blocks - 1) as u64), + ); + done = false; + } + } + } + } + } + for (i, peer) in peers.iter().enumerate() { + test_debug!( + "Peer {} has done {} p2p state-machine passes; {} inv syncs, {} download-syncs", + i, + peer.network.num_state_machine_passes, + peer.network.num_inv_sync_passes, + peer.network.num_downloader_passes + ); + } + + if done { + // all blocks obtained, now do custom check + if done_func(&mut peers) { + break; + } + } + + round += 1; + } + + info!("Completed walk round {} step(s)", round); + + let mut peer_invs = vec![]; + for peer in peers.iter_mut() { + let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks); + peer_invs.push(peer_inv); + + let availability = get_peer_availability( + peer, + first_stacks_block_height - first_sortition_height, + first_stacks_block_height - first_sortition_height + (num_blocks as u64), + ); + + assert_eq!(availability.len(), num_blocks); + assert_eq!(block_data.len(), num_blocks); + + for ( + (sn_consensus_hash, stacks_block_opt, microblocks_opt), + (consensus_hash, stacks_block_hash_opt, neighbors), + ) in block_data.iter().zip(availability.iter()) + { + assert_eq!(*consensus_hash, *sn_consensus_hash); + + if stacks_block_hash_opt.is_some() { + assert!(stacks_block_opt.is_some()); + assert_eq!( + *stacks_block_hash_opt, + Some(stacks_block_opt.as_ref().unwrap().block_hash()) + ); + } else { + assert!(stacks_block_opt.is_none()); + } + } + } + + drop(dns_clients); + for handle in dns_threads.drain(..) { + handle.join().unwrap(); + } + + peers + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + function_name!(), + 3200, + 2, + |ref mut peer_configs| { + // build initial network topology + assert_eq!(peer_configs.len(), 2); + + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = + peers[1].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + + // no block advertisements (should be disabled) + let _ = peer.for_each_convo_p2p(|event_id, convo| { + let cnt = *(convo + .stats + .msg_rx_counts + .get(&StacksMessageID::BlocksAvailable) + .unwrap_or(&0)); + assert_eq!( + cnt, 0, + "neighbor event={} got {} BlocksAvailable messages", + event_id, cnt + ); + Ok(()) + }); + + true + }, + |_| true, + ); + }) + } + + fn make_contract_call_transaction( + miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + spending_account: &mut TestMiner, + contract_address: StacksAddress, + contract_name: &str, + function_name: &str, + args: Vec, + consensus_hash: &ConsensusHash, + block_hash: &BlockHeaderHash, + nonce_offset: u64, + ) -> StacksTransaction { + let tx_cc = { + let mut tx_cc = StacksTransaction::new( + TransactionVersion::Testnet, + spending_account.as_transaction_auth().unwrap().into(), + TransactionPayload::new_contract_call( + contract_address, + contract_name, + function_name, + args, + ) + .unwrap(), + ); + + let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); + let cur_nonce = chainstate + .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { + clarity_tx.with_clarity_db_readonly(|clarity_db| { + clarity_db + .get_account_nonce(&spending_account.origin_address().unwrap().into()) + .unwrap() + }) + }) + .unwrap() + + nonce_offset; + + test_debug!( + "Nonce of {:?} is {} (+{}) at {}/{}", + &spending_account.origin_address().unwrap(), + cur_nonce, + nonce_offset, + consensus_hash, + block_hash + ); + + tx_cc.chain_id = 0x80000000; + tx_cc.auth.set_origin_nonce(cur_nonce); + tx_cc.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); + + let mut tx_signer = StacksTransactionSigner::new(&tx_cc); + spending_account.sign_as_origin(&mut tx_signer); + + let tx_cc_signed = tx_signer.get_tx().unwrap(); + + test_debug!( + "make transaction {:?} off of {:?}/{:?}: {:?}", + &tx_cc_signed.txid(), + consensus_hash, + block_hash, + &tx_cc_signed + ); + + spending_account.set_nonce(cur_nonce + 1); + tx_cc_signed + }; + + tx_cc + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { + // 20 reward cycles + with_timeout(600, || { + run_get_blocks_and_microblocks( + "test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks", + 32100, + 2, + |ref mut peer_configs| { + // build initial network topology + assert_eq!(peer_configs.len(), 2); + + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + + // peer[1] has a big initial balance + let initial_balances = vec![( + PrincipalData::from( + peer_configs[1].spending_account.origin_address().unwrap(), + ), + 1_000_000_000_000_000, + )]; + + peer_configs[0].initial_balances = initial_balances.clone(); + peer_configs[1].initial_balances = initial_balances; + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + let spending_account = &mut peers[1].config.spending_account.clone(); + + // function to make a tenure in which a the peer's miner stacks its STX + let mut make_stacking_tenure = |miner: &mut TestMiner, + sortdb: &mut SortitionDB, + chainstate: &mut StacksChainState, + vrfproof: VRFProof, + parent_opt: Option<&StacksBlock>, + microblock_parent_opt: Option< + &StacksMicroblockHeader, + >| { + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + + let stacks_tip_opt = + NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) + .unwrap(); + let parent_tip = match stacks_tip_opt { + None => { + StacksChainState::get_genesis_header_info(chainstate.db()).unwrap() + } + Some(header) => { + let ic = sortdb.index_conn(); + let snapshot = + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &header.anchored_header.block_hash(), + ) + .unwrap() + .unwrap(); // succeeds because we don't fork + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &snapshot.consensus_hash, + &snapshot.winning_stacks_block_hash, + ) + .unwrap() + .unwrap() + } + }; + + let parent_header_hash = parent_tip.anchored_header.block_hash(); + let parent_consensus_hash = parent_tip.consensus_hash.clone(); + let parent_index_hash = StacksBlockHeader::make_index_block_hash( + &parent_consensus_hash, + &parent_header_hash, + ); + + let coinbase_tx = make_coinbase_with_nonce( + miner, + parent_tip.stacks_block_height as usize, + miner.get_nonce(), + None, + ); + + let stack_tx = make_contract_call_transaction( + miner, + sortdb, + chainstate, + spending_account, + StacksAddress::burn_address(false), + "pox", + "stack-stx", + vec![ + Value::UInt(1_000_000_000_000_000 / 2), + execute("{ version: 0x00, hashbytes: 0x1000000010000000100000010000000100000001 }").unwrap().unwrap(), + Value::UInt((tip.block_height + 1) as u128), + Value::UInt(12) + ], + &parent_consensus_hash, + &parent_header_hash, + 0 + ); + + let mblock_tx = make_contract_call_transaction( + miner, + sortdb, + chainstate, + spending_account, + StacksAddress::burn_address(false), + "pox", + "get-pox-info", + vec![], + &parent_consensus_hash, + &parent_header_hash, + 4, + ); + + let mblock_privkey = StacksPrivateKey::new(); + + let mblock_pubkey_hash_bytes = Hash160::from_data( + &StacksPublicKey::from_private(&mblock_privkey).to_bytes(), + ); + + let mut builder = StacksBlockBuilder::make_block_builder( + chainstate.mainnet, + &parent_tip, + vrfproof, + tip.total_burn, + mblock_pubkey_hash_bytes, + ) + .unwrap(); + builder.set_microblock_privkey(mblock_privkey); + + let (anchored_block, _size, _cost, microblock_opt) = + StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( + builder, + chainstate, + &sortdb.index_conn(), + vec![coinbase_tx, stack_tx], + vec![mblock_tx], + ) + .unwrap(); + + (anchored_block, vec![microblock_opt.unwrap()]) + }; + + for i in 0..50 { + let (mut burn_ops, stacks_block, microblocks) = if i == 1 { + peers[1].make_tenure(&mut make_stacking_tenure) + } else { + peers[1].make_default_tenure() + }; + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + + // no block advertisements (should be disabled) + let _ = peer.for_each_convo_p2p(|event_id, convo| { + let cnt = *(convo + .stats + .msg_rx_counts + .get(&StacksMessageID::BlocksAvailable) + .unwrap_or(&0)); + assert_eq!( + cnt, 0, + "neighbor event={} got {} BlocksAvailable messages", + event_id, cnt + ); + Ok(()) + }); + + true + }, + |_| true, + ); + }) + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_5_peers_star() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + function_name!(), + 3210, + 5, + |ref mut peer_configs| { + // build initial network topology -- a star with + // peers[0] at the center, with all the blocks + assert_eq!(peer_configs.len(), 5); + let mut neighbors = vec![]; + + for p in peer_configs.iter_mut() { + p.connection_opts.disable_block_advertisement = true; + p.connection_opts.max_clients_per_host = 30; + } + + let peer_0 = peer_configs[0].to_neighbor(); + for i in 1..peer_configs.len() { + neighbors.push(peer_configs[i].to_neighbor()); + peer_configs[i].add_neighbor(&peer_0); + } + + for n in neighbors.drain(..) { + peer_configs[0].add_neighbor(&n); + } + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = + peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + true + }, + |_| true, + ); + }) + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_5_peers_line() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + function_name!(), + 3220, + 5, + |ref mut peer_configs| { + // build initial network topology -- a line with + // peers[0] at the left, with all the blocks + assert_eq!(peer_configs.len(), 5); + let mut neighbors = vec![]; + + for p in peer_configs.iter_mut() { + p.connection_opts.disable_block_advertisement = true; + p.connection_opts.max_clients_per_host = 30; + } + + for i in 0..peer_configs.len() { + neighbors.push(peer_configs[i].to_neighbor()); + } + + for i in 0..peer_configs.len() - 1 { + peer_configs[i].add_neighbor(&neighbors[i + 1]); + peer_configs[i + 1].add_neighbor(&neighbors[i]); + } + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = + peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + true + }, + |_| true, + ); + }) + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + function_name!(), + 3230, + 5, + |ref mut peer_configs| { + // build initial network topology -- a star with + // peers[0] at the center, with all the blocks + assert_eq!(peer_configs.len(), 5); + let mut neighbors = vec![]; + + for p in peer_configs.iter_mut() { + p.connection_opts.disable_block_advertisement = true; + } + + let peer_0 = peer_configs[0].to_neighbor(); + + for i in 1..peer_configs.len() { + neighbors.push(peer_configs[i].to_neighbor()); + peer_configs[i].add_neighbor(&peer_0); + + // severely restrict the number of allowed + // connections in each peer + peer_configs[i].connection_opts.max_clients_per_host = 1; + peer_configs[i].connection_opts.num_clients = 1; + peer_configs[i].connection_opts.idle_timeout = 1; + peer_configs[i].connection_opts.max_http_clients = 1; + } + + for n in neighbors.drain(..) { + peer_configs[0].add_neighbor(&n); + } + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = + peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + true + }, + |_| true, + ); + }) + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { + // this one can go for a while + with_timeout(1200, || { + run_get_blocks_and_microblocks( + function_name!(), + 3240, + 5, + |ref mut peer_configs| { + // build initial network topology -- a star with + // peers[0] at the center, with all the blocks + assert_eq!(peer_configs.len(), 5); + let mut neighbors = vec![]; + + for p in peer_configs.iter_mut() { + p.connection_opts.disable_block_advertisement = true; + } + + let peer_0 = peer_configs[0].to_neighbor(); + + for i in 1..peer_configs.len() { + neighbors.push(peer_configs[i].to_neighbor()); + peer_configs[i].add_neighbor(&peer_0); + + // severely restrict the number of events + peer_configs[i].connection_opts.max_sockets = 10; + } + + for n in neighbors.drain(..) { + peer_configs[0].add_neighbor(&n); + } + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = + peers[0].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[0].next_burnchain_block(burn_ops.clone()); + peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + for i in 1..peers.len() { + peers[i].next_burnchain_block_raw(burn_ops.clone()); + } + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[0].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + true + }, + |_| true, + ); + }) + } + + #[test] + #[ignore] + #[should_panic(expected = "blocked URL")] + pub fn test_get_blocks_and_microblocks_ban_url() { + use std::net::TcpListener; + use std::thread; + + let listener_1 = TcpListener::bind("127.0.0.1:3260").unwrap(); + let listener_2 = TcpListener::bind("127.0.0.1:3262").unwrap(); + + let endpoint_thread_1 = thread::spawn(move || { + let (sock, addr) = listener_1.accept().unwrap(); + test_debug!("Accepted 1 {:?}", &addr); + sleep_ms(60_000); + }); + + let endpoint_thread_2 = thread::spawn(move || { + let (sock, addr) = listener_2.accept().unwrap(); + test_debug!("Accepted 2 {:?}", &addr); + sleep_ms(60_000); + }); + + run_get_blocks_and_microblocks( + function_name!(), + 3250, + 2, + |ref mut peer_configs| { + // build initial network topology + assert_eq!(peer_configs.len(), 2); + + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + // announce URLs to our fake handlers + peer_configs[0].data_url = + UrlString::try_from("http://127.0.0.1:3260".to_string()).unwrap(); + peer_configs[1].data_url = + UrlString::try_from("http://127.0.0.1:3262".to_string()).unwrap(); + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + // build up block data to replicate + let mut block_data = vec![]; + for _ in 0..num_blocks { + let (mut burn_ops, stacks_block, microblocks) = peers[1].make_default_tenure(); + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } + block_data + }, + |_| {}, + |peer| { + let mut blocked = 0; + match peer.network.block_downloader { + Some(ref dl) => { + blocked = dl.blocked_urls.len(); + } + None => {} + } + if blocked >= 1 { + // NOTE: this is the success criterion + panic!("blocked URL"); + } + true + }, + |_| true, + ); + + endpoint_thread_1.join().unwrap(); + endpoint_thread_2.join().unwrap(); + } + + #[test] + #[ignore] + pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_descendants() { + with_timeout(600, || { + run_get_blocks_and_microblocks( + function_name!(), + 3260, + 2, + |ref mut peer_configs| { + // build initial network topology + assert_eq!(peer_configs.len(), 2); + + peer_configs[0].connection_opts.disable_block_advertisement = true; + peer_configs[1].connection_opts.disable_block_advertisement = true; + + let peer_0 = peer_configs[0].to_neighbor(); + let peer_1 = peer_configs[1].to_neighbor(); + peer_configs[0].add_neighbor(&peer_1); + peer_configs[1].add_neighbor(&peer_0); + }, + |num_blocks, ref mut peers| { + // build up block data to replicate. + // chainstate looks like this: + // + // [tenure-1] <- [mblock] <- [mblock] <- [mblock] <- [mblock] <- ... + // \ \ \ \ + // \ \ \ \ + // [tenure-2] [tenure-3] [tenure-4] [tenure-5] ... + // + let mut block_data = vec![]; + let mut microblock_stream = vec![]; + let mut first_block_height = 0; + for i in 0..num_blocks { + if i == 0 { + let (mut burn_ops, stacks_block, mut microblocks) = + peers[1].make_default_tenure(); + + // extend to 10 microblocks + while microblocks.len() != num_blocks { + let next_microblock_payload = TransactionPayload::SmartContract( + TransactionSmartContract { + name: ContractName::try_from(format!( + "hello-world-{}", + thread_rng().gen::() + )) + .expect("FATAL: valid name"), + code_body: StacksString::from_str( + "(begin (print \"hello world\"))", + ) + .expect("FATAL: valid code"), + }, + None, + ); + let mut mblock = microblocks.last().unwrap().clone(); + let last_nonce = mblock + .txs + .last() + .as_ref() + .unwrap() + .auth() + .get_origin_nonce(); + let prev_block = mblock.block_hash(); + + let signed_tx = sign_standard_singlesig_tx( + next_microblock_payload, + &peers[1].miner.privks[0], + last_nonce + 1, + 0, + ); + let txids = vec![signed_tx.txid().as_bytes().to_vec()]; + let merkle_tree = MerkleTree::::new(&txids); + let tx_merkle_root = merkle_tree.root(); + + mblock.txs = vec![signed_tx]; + mblock.header.tx_merkle_root = tx_merkle_root; + mblock.header.prev_block = prev_block; + mblock.header.sequence += 1; + mblock + .header + .sign(peers[1].miner.microblock_privks.last().as_ref().unwrap()) + .unwrap(); + + microblocks.push(mblock); + } + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + + peers[1].process_stacks_epoch( + &stacks_block, + &consensus_hash, + µblocks, + ); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + microblock_stream = microblocks.clone(); + first_block_height = sn.block_height as u32; + + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(microblocks), + )); + } else { + test_debug!("Build child block {}", i); + let tip = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + let chainstate_path = peers[1].chainstate_path.clone(); + + let (mut burn_ops, stacks_block, _) = peers[1].make_tenure( + |ref mut miner, + ref mut sortdb, + ref mut chainstate, + vrf_proof, + ref parent_opt, + ref parent_microblock_header_opt| { + let mut parent_tip = + StacksChainState::get_anchored_block_header_info( + chainstate.db(), + &block_data[0].0, + &block_data[0].1.as_ref().unwrap().block_hash(), + ) + .unwrap() + .unwrap(); + + parent_tip.microblock_tail = + Some(microblock_stream[i - 1].header.clone()); + + let mut mempool = + MemPoolDB::open_test(false, 0x80000000, &chainstate_path) + .unwrap(); + let coinbase_tx = + make_coinbase_with_nonce(miner, i, (i + 2) as u64, None); + + let (anchored_block, block_size, block_execution_cost) = + StacksBlockBuilder::build_anchored_block( + chainstate, + &sortdb.index_conn(), + &mut mempool, + &parent_tip, + parent_tip + .anchored_header + .as_stacks_epoch2() + .unwrap() + .total_work + .burn + + 1000, + vrf_proof, + Hash160([i as u8; 20]), + &coinbase_tx, + BlockBuilderSettings::max_value(), + None, + ) + .unwrap(); + (anchored_block, vec![]) + }, + ); + + for burn_op in burn_ops.iter_mut() { + if let BlockstackOperationType::LeaderBlockCommit(ref mut op) = + burn_op + { + op.parent_block_ptr = first_block_height; + op.block_header_hash = stacks_block.block_hash(); + } + } + + let (_, burn_header_hash, consensus_hash) = + peers[1].next_burnchain_block(burn_ops.clone()); + + peers[1].process_stacks_epoch(&stacks_block, &consensus_hash, &vec![]); + + TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); + + peers[0].next_burnchain_block_raw(burn_ops); + + let sn = SortitionDB::get_canonical_burn_chain_tip( + &peers[1].sortdb.as_ref().unwrap().conn(), + ) + .unwrap(); + + block_data.push(( + sn.consensus_hash.clone(), + Some(stacks_block), + Some(vec![]), + )); + } + } + block_data + }, + |_| {}, + |peer| { + // check peer health + // nothing should break + match peer.network.block_downloader { + Some(ref dl) => { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); + } + None => {} + } + + // no block advertisements (should be disabled) + let _ = peer.for_each_convo_p2p(|event_id, convo| { + let cnt = *(convo + .stats + .msg_rx_counts + .get(&StacksMessageID::BlocksAvailable) + .unwrap_or(&0)); + assert_eq!( + cnt, 0, + "neighbor event={} got {} BlocksAvailable messages", + event_id, cnt + ); + Ok(()) + }); + + true + }, + |_| true, + ); + }) + } +} diff --git a/stackslib/src/net/download/mod.rs b/stackslib/src/net/download/mod.rs deleted file mode 100644 index 1c0bbb39e40..00000000000 --- a/stackslib/src/net/download/mod.rs +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -pub mod epoch2x; -pub mod nakamoto; - -pub use epoch2x::{BlockDownloader, BLOCK_DOWNLOAD_INTERVAL}; diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs deleted file mode 100644 index 77cf64dba6b..00000000000 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ /dev/null @@ -1,1844 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// The overall downloader can operate in one of two states: -/// * it's doing IBD, in which case it's downloading tenures using neighbor inventories and -/// the start/end block ID hashes obtained from block-commits. This works up until the last two -/// tenures. -/// * it's in steady-state, in which case it's downloading the last two tenures from its neighbors. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoDownloadState { - /// confirmed tenure download (IBD) - Confirmed, - /// unconfirmed tenure download (steady-state) - Unconfirmed, -} - -impl fmt::Display for NakamotoDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// The top-level block download state machine -pub struct NakamotoDownloadStateMachine { - /// What's the start burn block height for Nakamoto? - nakamoto_start_height: u64, - /// What's the current reward cycle we're tracking? - pub(crate) reward_cycle: u64, - /// List of (possible) tenures in the current reward cycle - pub(crate) wanted_tenures: Vec, - /// List of (possible) tenures in the previous reward cycle. Will be None in the first reward - /// cycle of Nakamoto - pub(crate) prev_wanted_tenures: Option>, - /// Last burnchain tip we've seen - last_sort_tip: Option, - /// Download behavior we're in - state: NakamotoDownloadState, - /// Map a tenure ID to its tenure start-block and end-block for each of our neighbors' invs - tenure_block_ids: HashMap, - /// Who can serve a given tenure - pub(crate) available_tenures: HashMap>, - /// Confirmed tenure download schedule - pub(crate) tenure_download_schedule: VecDeque, - /// Unconfirmed tenure download schedule - unconfirmed_tenure_download_schedule: VecDeque, - /// Ongoing unconfirmed tenure downloads, prioritized in who announces the latest block - unconfirmed_tenure_downloads: HashMap, - /// Ongoing confirmed tenure downloads for when we know the start and end block hashes. - tenure_downloads: NakamotoTenureDownloaderSet, - /// resolved tenure-start blocks - tenure_start_blocks: HashMap, - /// comms to remote neighbors - pub(super) neighbor_rpc: NeighborRPC, -} - -impl NakamotoDownloadStateMachine { - pub fn new(nakamoto_start_height: u64) -> Self { - Self { - nakamoto_start_height, - reward_cycle: 0, // will be calculated at runtime - wanted_tenures: vec![], - prev_wanted_tenures: None, - last_sort_tip: None, - state: NakamotoDownloadState::Confirmed, - tenure_block_ids: HashMap::new(), - available_tenures: HashMap::new(), - tenure_download_schedule: VecDeque::new(), - unconfirmed_tenure_download_schedule: VecDeque::new(), - tenure_downloads: NakamotoTenureDownloaderSet::new(), - unconfirmed_tenure_downloads: HashMap::new(), - tenure_start_blocks: HashMap::new(), - neighbor_rpc: NeighborRPC::new(), - } - } - - /// Get a range of wanted tenures between two burnchain blocks. - /// Each wanted tenure's .processed flag will be set to false. - /// - /// Returns the tenures from first_block_height (inclusive) to last_block_height (exclusive) on - /// success. - /// - /// Returns Err(..) on DB error, or if one or both of these heights do not correspond to a - /// sortition. - pub(crate) fn load_wanted_tenures( - ih: &SortitionHandleConn, - first_block_height: u64, - last_block_height: u64, - ) -> Result, NetError> { - let mut wanted_tenures = Vec::with_capacity( - usize::try_from(last_block_height.saturating_sub(first_block_height)) - .expect("FATAL: infallible: usize can't old a reward cycle"), - ); - let mut cursor = ih - .get_block_snapshot_by_height(last_block_height.saturating_sub(1))? - .ok_or(DBError::NotFoundError)?; - while cursor.block_height >= first_block_height { - test_debug!( - "Load sortition {}/{} burn height {}", - &cursor.consensus_hash, - &cursor.winning_stacks_block_hash, - cursor.block_height - ); - wanted_tenures.push(WantedTenure::new( - cursor.consensus_hash, - StacksBlockId(cursor.winning_stacks_block_hash.0), - cursor.block_height, - )); - cursor = SortitionDB::get_block_snapshot(&ih, &cursor.parent_sortition_id)? - .ok_or(DBError::NotFoundError)?; - } - wanted_tenures.reverse(); - Ok(wanted_tenures) - } - - /// Update a given list of wanted tenures (`wanted_tenures`), which may already have wanted - /// tenures. Appends new tenures for the given reward cycle (`cur_rc`) to `wanted_tenures`. - /// - /// Returns Ok(()) on sucess, and appends new tenures in the given reward cycle (`cur_rc`) to - /// `wanted_tenures`. - /// Returns Err(..) on DB errors. - pub(crate) fn update_wanted_tenures_for_reward_cycle( - cur_rc: u64, - tip: &BlockSnapshot, - sortdb: &SortitionDB, - wanted_tenures: &mut Vec, - ) -> Result<(), NetError> { - let highest_tenure_height = wanted_tenures.last().map(|wt| wt.burn_height).unwrap_or(0); - - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len - let first_block_height = sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc) - .saturating_sub(1) - .max(highest_tenure_height.saturating_add(1)); - - let last_block_height = sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc.saturating_add(1)) - .saturating_sub(1) - .min(tip.block_height.saturating_add(1)); - - if highest_tenure_height > last_block_height { - test_debug!( - "Will NOT update wanted tenures for reward cycle {}: {} > {}", - cur_rc, - highest_tenure_height, - last_block_height - ); - return Ok(()); - } - - test_debug!( - "Update reward cycle sortitions between {} and {} (rc is {})", - first_block_height, - last_block_height, - cur_rc - ); - - // find all sortitions in this reward cycle - let ih = sortdb.index_handle(&tip.sortition_id); - let mut new_tenures = - Self::load_wanted_tenures(&ih, first_block_height, last_block_height)?; - wanted_tenures.append(&mut new_tenures); - Ok(()) - } - - /// Given the last-considered sortition tip and the current sortition tip, and a list of wanted - /// tenures loaded so far, load up any new wanted tenure data _in the same reward cycle_. Used - /// during steady-state to load up new tenures after the sorittion DB advances. - /// - /// It may return zero tenures. - /// - /// Returns Ok(new-tenures) on success. - /// Returns Err(..) on error. - pub(crate) fn load_wanted_tenures_at_tip( - last_tip: Option<&BlockSnapshot>, - tip: &BlockSnapshot, - sortdb: &SortitionDB, - loaded_so_far: &[WantedTenure], - ) -> Result, NetError> { - let tip_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap_or(0); - - let first_block_height = if let Some(highest_wanted_tenure) = loaded_so_far.last() { - highest_wanted_tenure.burn_height.saturating_add(1) - } else if let Some(last_tip) = last_tip.as_ref() { - last_tip.block_height.saturating_add(1) - } else { - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len. - sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc) - .saturating_sub(1) - }; - - // be extra careful with last_block_height -- we not only account for the above, but also - // we need to account for the fact that `load_wanted_tenures` does not load the sortition - // of the last block height (but we want this!) - let last_block_height = sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, tip_rc.saturating_add(1)) - .saturating_sub(1) - .min(tip.block_height) - .saturating_add(1); - - test_debug!( - "Load tip sortitions between {} and {} (loaded_so_far = {})", - first_block_height, - last_block_height, - loaded_so_far.len() - ); - if last_block_height < first_block_height { - return Ok(vec![]); - } - - let ih = sortdb.index_handle(&tip.sortition_id); - let wanted_tenures = Self::load_wanted_tenures(&ih, first_block_height, last_block_height)?; - - test_debug!( - "Loaded tip sortitions between {} and {} (loaded_so_far = {}): {:?}", - first_block_height, - last_block_height, - loaded_so_far.len(), - &wanted_tenures - ); - Ok(wanted_tenures) - } - - /// Update the .processed state for each given wanted tenure. - /// Set it to true if any of the following are true: - /// * the tenure is before the nakamoto start height - /// * we have processed the entire tenure - /// - /// This function exists as a static function for ease of testing. - /// - /// Returns Ok(()) on success - /// Returns Err(..) on DB error - pub(crate) fn inner_update_processed_wanted_tenures( - nakamoto_start: u64, - wanted_tenures: &mut [WantedTenure], - chainstate: &StacksChainState, - ) -> Result<(), NetError> { - for wt in wanted_tenures.iter_mut() { - test_debug!("update_processed_wanted_tenures: consider {:?}", &wt); - if wt.processed { - continue; - } - if wt.burn_height < nakamoto_start { - // not our problem - wt.processed = true; - continue; - } - if NakamotoChainState::has_processed_nakamoto_tenure( - chainstate.db(), - &wt.tenure_id_consensus_hash, - )? { - test_debug!("Tenure {} is now processed", &wt.tenure_id_consensus_hash); - wt.processed = true; - continue; - } - } - Ok(()) - } - - /// Update the .processed state for each wanted tenure in the `prev_wanted_tenures` and - /// `wanted_tenures` lists. - /// - /// Returns Ok(()) on success - /// Returns Err(..) on DB error - pub(crate) fn update_processed_tenures( - &mut self, - chainstate: &StacksChainState, - ) -> Result<(), NetError> { - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_mut() { - test_debug!("update_processed_wanted_tenures: update prev_tenures"); - Self::inner_update_processed_wanted_tenures( - self.nakamoto_start_height, - prev_wanted_tenures, - chainstate, - )?; - } - test_debug!("update_processed_wanted_tenures: update wanted_tenures"); - Self::inner_update_processed_wanted_tenures( - self.nakamoto_start_height, - &mut self.wanted_tenures, - chainstate, - ) - } - - /// Find all stored (but not necessarily processed) tenure-start blocks for a list - /// of wanted tenures that this node has locally. NOTE: these tenure-start blocks - /// do not correspond to the tenure; they correspond to the _parent_ tenure (since a - /// `WantedTenure` captures the tenure-start block hash of the parent tenure; the same data - /// captured by a sortition). - /// - /// This method is static to ease testing. - /// - /// Returns Ok(()) on success and fills in newly-discovered blocks into `tenure_start_blocks`. - /// Returns Err(..) on DB error. - pub(crate) fn load_tenure_start_blocks( - wanted_tenures: &[WantedTenure], - chainstate: &StacksChainState, - tenure_start_blocks: &mut HashMap, - ) -> Result<(), NetError> { - for wt in wanted_tenures { - let Some(tenure_start_block) = chainstate - .nakamoto_blocks_db() - .get_nakamoto_tenure_start_block(&wt.tenure_id_consensus_hash)? - else { - test_debug!("No tenure-start block for {}", &wt.tenure_id_consensus_hash); - continue; - }; - tenure_start_blocks.insert(tenure_start_block.block_id(), tenure_start_block); - } - Ok(()) - } - - /// Update our local tenure start block data - fn update_tenure_start_blocks( - &mut self, - chainstate: &StacksChainState, - ) -> Result<(), NetError> { - Self::load_tenure_start_blocks( - &self.wanted_tenures, - chainstate, - &mut self.tenure_start_blocks, - ) - } - - /// Update `self.wanted_tenures` and `self.prev_wanted_tenures` with newly-discovered sortition - /// data. These lists are extended in three possible ways, depending on the sortition tip: - /// - /// * If the sortition tip is in the same reward cycle that the block downloader is tracking, - /// then any newly-available sortitions are loaded via `load_wanted_tenures_at_tip()` and appended - /// to `self.wanted_tenures`. This is what happens most of the time in steady-state. - /// - /// * Otherwise, if the sortition tip is different (i.e. ahead) of the block downloader's - /// tracked reward cycle, _and_ if it's safe to do so (discussed below), then the next reward - /// cycle's sortitions are loaded. `self.prev_wanted_tenures` is populated with all of the - /// wanted tenures from the prior reward cycle, and `self.wanted_tenures` is populated with all - /// of the wanted tenures from the current reward cycle. - /// - /// Due to the way the chains coordinator works, the sortition DB will never be more than one - /// reward cycle ahead of the block downloader. This is because sortitions cannot be processed - /// (and will not be processed) until their corresponding PoX anchor block has been processed. - /// As such, the second case above only occurs at a reward cycle boundary -- specifically, the - /// sortition DB is in the process of being updated by the chains coordinator with the next - /// reward cycle's sortitions. - /// - /// Naturally, processing a new reward cycle is disruptive to the download state machine, which - /// can be in the process of finishing up downloading the prepare phase for a reward cycle at - /// the same time as the sortition DB processing the next reward cycle. To ensure that the - /// downloader doesn't miss anything, this code checks (via `have_unprocessed_tenures()`) that - /// all wanted tenures for which we have inventory data have been downloaded before advancing - /// `self.wanted_tenures` and `self.prev_wanted_tenures.` - fn extend_wanted_tenures( - &mut self, - network: &PeerNetwork, - sortdb: &SortitionDB, - ) -> Result<(), NetError> { - let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - test_debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; - - let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); - let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) - .expect("FATAL: burnchain tip is before system start"); - - let mut new_wanted_tenures = Self::load_wanted_tenures_at_tip( - self.last_sort_tip.as_ref(), - sort_tip, - sortdb, - &self.wanted_tenures, - )?; - - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: first nakamoto block from before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - test_debug!("No prev_wanted_tenures yet"); - true - }; - - if can_advance_wanted_tenures && self.reward_cycle != sort_rc { - let mut prev_wanted_tenures = vec![]; - let mut cur_wanted_tenures = vec![]; - let prev_wts = self.prev_wanted_tenures.take().unwrap_or(vec![]); - let cur_wts = std::mem::replace(&mut self.wanted_tenures, vec![]); - - for wt in new_wanted_tenures - .into_iter() - .chain(prev_wts.into_iter()) - .chain(cur_wts.into_iter()) - { - test_debug!("Consider wanted tenure: {:?}", &wt); - let wt_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, wt.burn_height) - .expect("FATAL: height before system start"); - if wt_rc + 1 == sort_rc { - prev_wanted_tenures.push(wt); - } else if wt_rc == sort_rc { - cur_wanted_tenures.push(wt); - } else { - test_debug!("Drop wanted tenure: {:?}", &wt); - } - } - - prev_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - cur_wanted_tenures.sort_unstable_by_key(|wt| wt.burn_height); - - test_debug!("prev_wanted_tenures is now {:?}", &prev_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &cur_wanted_tenures); - - self.prev_wanted_tenures = if prev_wanted_tenures.is_empty() { - None - } else { - Some(prev_wanted_tenures) - }; - self.wanted_tenures = cur_wanted_tenures; - self.reward_cycle = sort_rc; - } else { - test_debug!( - "Append {} wanted tenures: {:?}", - new_wanted_tenures.len(), - &new_wanted_tenures - ); - self.wanted_tenures.append(&mut new_wanted_tenures); - test_debug!("wanted_tenures is now {:?}", &self.wanted_tenures); - } - - Ok(()) - } - - /// Initialize `self.wanted_tenures` and `self.prev_wanted_tenures` for the first time, if they - /// are not set up yet. At all times, `self.prev_wanted_tenures` ought to be initialized to the last - /// full reward cycle's tenures, and `self.wanted_tenures` ought to be initialized to the - /// ongoing reward cycle's tenures. - pub(crate) fn initialize_wanted_tenures( - &mut self, - sort_tip: &BlockSnapshot, - sortdb: &SortitionDB, - ) -> Result<(), NetError> { - // check for reorgs - let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), sort_tip, sortdb); - if reorg { - // force a reload - test_debug!("Detected reorg! Refreshing wanted tenures"); - self.prev_wanted_tenures = None; - self.wanted_tenures.clear(); - } - - if self - .prev_wanted_tenures - .as_ref() - .map(|pwts| pwts.len()) - .unwrap_or(0) - < usize::try_from(sortdb.pox_constants.reward_cycle_length) - .expect("FATAL: usize cannot support reward cycle length") - { - // this is the first-ever pass, so load up the last full reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start") - .saturating_sub(1); - - let mut prev_wanted_tenures = vec![]; - Self::update_wanted_tenures_for_reward_cycle( - sort_rc, - sort_tip, - sortdb, - &mut prev_wanted_tenures, - )?; - - test_debug!( - "initial prev_wanted_tenures (rc {}): {:?}", - sort_rc, - &prev_wanted_tenures - ); - self.prev_wanted_tenures = Some(prev_wanted_tenures); - } - if self.wanted_tenures.is_empty() { - // this is the first-ever pass, so load up the current reward cycle - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start"); - - let mut wanted_tenures = vec![]; - Self::update_wanted_tenures_for_reward_cycle( - sort_rc, - sort_tip, - sortdb, - &mut wanted_tenures, - )?; - - test_debug!( - "initial wanted_tenures (rc {}): {:?}", - sort_rc, - &wanted_tenures - ); - self.wanted_tenures = wanted_tenures; - self.reward_cycle = sort_rc; - } - Ok(()) - } - - /// Determine if the set of `TenureStartEnd`s represents available but unfetched data. Used to - /// determine whether or not to update the set of wanted tenures -- we don't want to skip - /// fetching wanted tenures if they're still available! - pub(crate) fn have_unprocessed_tenures<'a>( - first_nakamoto_rc: u64, - completed_tenures: &HashSet, - prev_wanted_tenures: &[WantedTenure], - tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, - ) -> bool { - if prev_wanted_tenures.is_empty() { - return true; - } - - // the anchor block for prev_wanted_tenures must not only be processed, but also we have to - // have seen an inventory message from the subsequent reward cycle. If we can see - // inventory messages for the reward cycle after `prev_wanted_rc`, then the former will be - // true - let prev_wanted_rc = prev_wanted_tenures - .first() - .map(|wt| { - pox_constants - .block_height_to_reward_cycle(first_burn_height, wt.burn_height) - .expect("FATAL: wanted tenure before system start") - }) - .unwrap_or(u64::MAX); - - let cur_wanted_rc = prev_wanted_rc.saturating_add(1); - - let mut has_prev_inv = false; - let mut has_cur_inv = false; - for inv in inventory_iter { - if prev_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_prev_inv = true; - } else if inv.tenures_inv.get(&prev_wanted_rc).is_some() { - has_prev_inv = true; - } - - if cur_wanted_rc < first_nakamoto_rc { - // assume the epoch 2.x inventory has this - has_cur_inv = true; - } else if inv.tenures_inv.get(&cur_wanted_rc).is_some() { - has_cur_inv = true; - } - } - - if !has_prev_inv || !has_cur_inv { - debug!("No peer has an inventory for either the previous ({}: available = {}) or current ({}: available = {}) wanted tenures", prev_wanted_rc, has_prev_inv, cur_wanted_rc, has_cur_inv); - return true; - } - - // the state machine updates `tenure_block_ids` _after_ `wanted_tenures`, so verify that - // this isn't a stale `tenure_block_ids` by checking that it contains at least one block in - // the prev_wanted_rc and at least one in the cur_wanted_rc - let mut has_prev_rc_block = false; - let mut has_cur_rc_block = false; - for (_naddr, available) in tenure_block_ids.iter() { - for (_ch, tenure_info) in available.iter() { - if tenure_info.start_reward_cycle == prev_wanted_rc - || tenure_info.end_reward_cycle == prev_wanted_rc - { - has_prev_rc_block = true; - } - if tenure_info.start_reward_cycle == cur_wanted_rc - || tenure_info.end_reward_cycle == cur_wanted_rc - { - has_cur_rc_block = true; - } - } - } - - if (prev_wanted_rc >= first_nakamoto_rc && !has_prev_rc_block) - || (cur_wanted_rc >= first_nakamoto_rc && !has_cur_rc_block) - { - debug!( - "tenure_block_ids stale: missing representation in reward cycles {} ({}) and {} ({})", - prev_wanted_rc, - has_prev_rc_block, - cur_wanted_rc, - has_cur_rc_block, - ); - return true; - } - - let mut ret = false; - for (_naddr, available) in tenure_block_ids.iter() { - for wt in prev_wanted_tenures.iter() { - let Some(tenure_info) = available.get(&wt.tenure_id_consensus_hash) else { - continue; - }; - if completed_tenures.contains(&tenure_info.tenure_id_consensus_hash) { - // this check is necessary because the check for .processed requires that a - // child tenure block has been processed, which isn't guaranteed at a reward - // cycle boundary - test_debug!("Tenure {:?} has been fully downloaded", &tenure_info); - continue; - } - if !tenure_info.processed { - test_debug!( - "Tenure {:?} is available from {} but not processed", - &tenure_info, - &_naddr - ); - ret = true; - } - } - } - ret - } - - /// Update the state machine's wanted tenures and processed tenures, if it's time to do so. - /// This will only happen when the sortition DB has finished processing a reward cycle of - /// tenures when in IBD mode, _OR_ when the sortition tip advances when in steady-state mode. - /// This is the top-level method for managing `self.wanted_tenures` and - /// `self.prev_wanted_tenures`. - /// - /// In the first case, this function will load up the whole list of wanted - /// tenures for this reward cycle, and proceed to download them. This happens only on reward - /// cycle boundaries, where the sortition DB is about to begin processing a new reward cycle. - /// The list of wanted tenures for the current reward cycle will be saved as - /// `self.prev_wanted_tenures`, and the set of wanted tenures for the next reward cycle - /// will be stored to `self.wanted_tenures`. It will only update these two lists if it is safe - /// to do so, as determined by `have_unprocessed_tenures()`. - /// - /// In the second case (i.e. not a reward cycle boundary), this function will load up _new_ - /// wanted tenure data and append it to `self.wanted_tenures` via - /// `self.extend_wanted_tenures()` above. If it turns out that the downloader's tracked reward - /// cycle is behind the sortition DB tip's reward cycle, then this will update - /// `self.wnated_tenures` and `self.prev_wanted_tenures` if it is safe to do so. - pub(crate) fn update_wanted_tenures( - &mut self, - network: &PeerNetwork, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ) -> Result<(), NetError> { - let sort_tip = &network.burnchain_tip; - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - test_debug!("No network inventories"); - return Err(NetError::PeerNotConnected); - }; - - self.initialize_wanted_tenures(sort_tip, sortdb)?; - let last_sort_height_opt = self.last_sort_tip.as_ref().map(|sn| sn.block_height); - let last_sort_height = last_sort_height_opt.unwrap_or(sort_tip.block_height); - let sort_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, last_sort_height) - .expect("FATAL: burnchain tip is before system start"); - - let next_sort_rc = if last_sort_height == sort_tip.block_height { - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - sort_tip.block_height.saturating_add(1), - ) - .expect("FATAL: burnchain tip is before system start") - } else { - sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, sort_tip.block_height) - .expect("FATAL: burnchain tip is before system start") - }; - - test_debug!( - "last_sort_height = {}, sort_rc = {}, next_sort_rc = {}, self.reward_cycle = {}, sort_tip.block_height = {}", - last_sort_height, - sort_rc, - next_sort_rc, - self.reward_cycle, - sort_tip.block_height, - ); - - if sort_rc == next_sort_rc { - // not at a reward cycle boundary, os just extend self.wanted_tenures - test_debug!("Extend wanted tenures since no sort_rc change and we have tenure data"); - self.extend_wanted_tenures(network, sortdb)?; - self.update_tenure_start_blocks(chainstate)?; - return Ok(()); - } - - let can_advance_wanted_tenures = - if let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() { - !Self::have_unprocessed_tenures( - sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - self.nakamoto_start_height, - ) - .expect("FATAL: nakamoto starts before system start"), - &self.tenure_downloads.completed_tenures, - prev_wanted_tenures, - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - ) - } else { - test_debug!("No prev_wanted_tenures yet"); - true - }; - if !can_advance_wanted_tenures { - return Ok(()); - } - - // crossed reward cycle boundary - let mut new_wanted_tenures = vec![]; - Self::update_wanted_tenures_for_reward_cycle( - sort_rc + 1, - sort_tip, - sortdb, - &mut new_wanted_tenures, - )?; - - let mut new_prev_wanted_tenures = vec![]; - Self::update_wanted_tenures_for_reward_cycle( - sort_rc, - sort_tip, - sortdb, - &mut new_prev_wanted_tenures, - )?; - - test_debug!("new_wanted_tenures is now {:?}", &new_wanted_tenures); - test_debug!( - "new_prev_wanted_tenures is now {:?}", - &new_prev_wanted_tenures - ); - - self.prev_wanted_tenures = if new_prev_wanted_tenures.is_empty() { - None - } else { - Some(new_prev_wanted_tenures) - }; - self.wanted_tenures = new_wanted_tenures; - self.reward_cycle = sort_rc; - - self.update_tenure_start_blocks(chainstate)?; - Ok(()) - } - - /// Given a set of inventory bit vectors for the current reward cycle, find out which neighbors - /// can serve each tenure (identified by the tenure ID consensus hash). - /// Every tenure ID consensus hash in `wanted_tenures` will be mapped to the returned hash - /// table, but the list of addresses may be empty if no neighbor reports having that tenure. - pub(crate) fn find_available_tenures<'a>( - reward_cycle: u64, - wanted_tenures: &[WantedTenure], - mut inventory_iter: impl Iterator, - ) -> HashMap> { - let mut available: HashMap> = HashMap::new(); - for wt in wanted_tenures.iter() { - available.insert(wt.tenure_id_consensus_hash.clone(), vec![]); - } - - while let Some((naddr, inv)) = inventory_iter.next() { - let Some(rc_inv) = inv.tenures_inv.get(&reward_cycle) else { - // this peer has no inventory data for this reward cycle - debug!( - "Peer {} has no inventory for reward cycle {}", - naddr, reward_cycle - ); - continue; - }; - for (i, wt) in wanted_tenures.iter().enumerate() { - if wt.processed { - continue; - } - - let (ch, ibh) = (&wt.tenure_id_consensus_hash, &wt.winning_block_id); - if ibh == &StacksBlockId([0x00; 32]) { - continue; - } - - let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); - if !rc_inv.get(bit).unwrap_or(false) { - // this neighbor does not have this tenure - test_debug!( - "Peer {} does not have sortition #{} in reward cycle {} (wt {:?})", - naddr, - bit, - reward_cycle, - &wt - ); - continue; - } - - if let Some(neighbor_list) = available.get_mut(ch) { - neighbor_list.push(naddr.clone()); - } else { - available.insert(ch.clone(), vec![naddr.clone()]); - } - } - } - available - } - - /// Find each peer's mapping between tenure ID consensus hashes for the tenures it claims to - /// have in its inventory vector, and its tenure start block ID. - /// - /// This is a static method to facilitate testing. - pub(crate) fn find_tenure_block_ids<'a>( - rc: u64, - wanted_tenures: &[WantedTenure], - next_wanted_tenures: Option<&[WantedTenure]>, - pox_constants: &PoxConstants, - first_burn_height: u64, - mut inventory_iter: impl Iterator, - ) -> HashMap { - let mut tenure_block_ids = HashMap::new(); - while let Some((naddr, tenure_inv)) = inventory_iter.next() { - let Some(peer_tenure_block_ids) = TenureStartEnd::from_inventory( - rc, - wanted_tenures, - next_wanted_tenures, - pox_constants, - first_burn_height, - tenure_inv, - ) else { - // this peer doesn't know about this reward cycle - continue; - }; - tenure_block_ids.insert(naddr.clone(), peer_tenure_block_ids); - } - tenure_block_ids - } - - /// Produce a download schedule for IBD mode. Tenures will be downloaded in sortition order. - /// The first item will be fetched first. - pub(crate) fn make_ibd_download_schedule( - nakamoto_start: u64, - wanted_tenures: &[WantedTenure], - available: &HashMap>, - ) -> VecDeque { - let mut schedule = VecDeque::new(); - for wt in wanted_tenures.iter() { - if wt.processed { - continue; - } - if wt.burn_height < nakamoto_start { - continue; - } - if !available.contains_key(&wt.tenure_id_consensus_hash) { - continue; - } - schedule.push_back(wt.tenure_id_consensus_hash.clone()); - } - schedule - } - - /// Produce a download schedule for steady-state mode. Tenures will be downloaded in - /// rarest-first order. - /// The first item will be fetched first. - pub(crate) fn make_rarest_first_download_schedule( - nakamoto_start: u64, - wanted_tenures: &[WantedTenure], - available: &HashMap>, - ) -> VecDeque { - let mut schedule = Vec::with_capacity(available.len()); - for wt in wanted_tenures.iter() { - if wt.processed { - continue; - } - if wt.burn_height < nakamoto_start { - continue; - } - let Some(neighbors) = available.get(&wt.tenure_id_consensus_hash) else { - continue; - }; - schedule.push((neighbors.len(), wt.tenure_id_consensus_hash.clone())); - } - - // order by fewest neighbors first - schedule.sort_by(|a, b| a.0.cmp(&b.0)); - schedule.into_iter().map(|(_count, ch)| ch).collect() - } - - /// How many neighbors can we contact still, given the map of tenures to neighbors which can - /// serve it? - fn count_available_tenure_neighbors( - available: &HashMap>, - ) -> usize { - available - .iter() - .fold(0, |count, (_ch, naddrs)| count.saturating_add(naddrs.len())) - } - - /// This function examines the contents of `self.wanted_tenures` and - /// `self.prev_wanted_tenures`, and calculates the following: - /// - /// * The set of `TenureStartEnd`s for both `self.wanted_tenures` and - /// `self.prev_wanted_tenures`, given the peers' inventory vectors. - /// - /// * The set of which tenures are available from which neighbors - /// - /// * The order in which to fetch tenure data, based on whether or not we're in IBD or - /// steady-state. - /// - /// This function should be called immediately after `update_wanted_tenures()`. - pub(crate) fn update_available_tenures( - &mut self, - inventories: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - ibd: bool, - ) { - if self.tenure_download_schedule.is_empty() { - // try again - self.available_tenures.clear(); - self.tenure_block_ids.clear(); - } - if Self::count_available_tenure_neighbors(&self.available_tenures) > 0 { - // still have requests to try, so don't bother computing a new set of available tenures - test_debug!("Still have requests to try"); - return; - } - if self.wanted_tenures.is_empty() { - // nothing to do - return; - } - if inventories.is_empty() { - // nothing to do - test_debug!("No inventories available"); - return; - } - - // calculate self.available - // get available tenures for both the current and previous reward cycles - let prev_available = self - .prev_wanted_tenures - .as_ref() - .map(|prev_wanted_tenures| { - test_debug!( - "Load availability for prev_wanted_tenures ({}) at rc {}", - prev_wanted_tenures.len(), - self.reward_cycle.saturating_sub(1) - ); - Self::find_available_tenures( - self.reward_cycle.saturating_sub(1), - prev_wanted_tenures, - inventories.iter(), - ) - }) - .unwrap_or(HashMap::new()); - - let mut available = Self::find_available_tenures( - self.reward_cycle, - &self.wanted_tenures, - inventories.iter(), - ); - available.extend(prev_available.into_iter()); - - // calculate self.tenure_block_ids - let prev_tenure_block_ids = self.prev_wanted_tenures - .as_ref() - .map(|prev_wanted_tenures| { - // have both self.prev_wanted_tenures and self.wanted_tenures - test_debug!("Load tenure block IDs for prev_wanted_tenures ({}) and wanted_tenures ({}) at rc {}", prev_wanted_tenures.len(), self.wanted_tenures.len(), self.reward_cycle.saturating_sub(1)); - Self::find_tenure_block_ids( - self.reward_cycle.saturating_sub(1), - prev_wanted_tenures, - Some(&self.wanted_tenures), - pox_constants, - first_burn_height, - inventories.iter(), - ) - }) - .unwrap_or(HashMap::new()); - - let mut tenure_block_ids = { - test_debug!( - "Load tenure block IDs for wanted_tenures ({}) at rc {}", - self.wanted_tenures.len(), - self.reward_cycle - ); - Self::find_tenure_block_ids( - self.reward_cycle, - &self.wanted_tenures, - None, - pox_constants, - first_burn_height, - inventories.iter(), - ) - }; - - // merge tenure block IDs - for (naddr, prev_available) in prev_tenure_block_ids.into_iter() { - if let Some(available) = tenure_block_ids.get_mut(&naddr) { - available.extend(prev_available.into_iter()); - } else { - tenure_block_ids.insert(naddr, prev_available); - } - } - - // create download schedules for unprocessed blocks - let schedule = if ibd { - let mut prev_schedule = self - .prev_wanted_tenures - .as_ref() - .map(|prev_wanted_tenures| { - Self::make_ibd_download_schedule( - self.nakamoto_start_height, - prev_wanted_tenures, - &available, - ) - }) - .unwrap_or(VecDeque::new()); - - let schedule = Self::make_ibd_download_schedule( - self.nakamoto_start_height, - &self.wanted_tenures, - &available, - ); - - prev_schedule.extend(schedule.into_iter()); - prev_schedule - } else { - let mut prev_schedule = self - .prev_wanted_tenures - .as_ref() - .map(|prev_wanted_tenures| { - Self::make_rarest_first_download_schedule( - self.nakamoto_start_height, - prev_wanted_tenures, - &available, - ) - }) - .unwrap_or(VecDeque::new()); - - let schedule = Self::make_rarest_first_download_schedule( - self.nakamoto_start_height, - &self.wanted_tenures, - &available, - ); - - prev_schedule.extend(schedule.into_iter()); - prev_schedule - }; - - test_debug!("new schedule: {:?}", schedule); - test_debug!("new available: {:?}", &available); - test_debug!("new tenure_block_ids: {:?}", &tenure_block_ids); - - self.tenure_download_schedule = schedule; - self.tenure_block_ids = tenure_block_ids; - self.available_tenures = available; - } - - /// Update our tenure download state machines, given our download schedule, our peers' tenure - /// availabilities, and our computed `TenureStartEnd`s - fn update_tenure_downloaders( - &mut self, - count: usize, - agg_public_keys: &BTreeMap>, - ) { - self.tenure_downloads.make_tenure_downloaders( - &mut self.tenure_download_schedule, - &mut self.available_tenures, - &mut self.tenure_block_ids, - count, - agg_public_keys, - ) - } - - /// Determine whether or not we can start downloading the highest complete tenure and the - /// unconfirmed tenure. Only do this if (1) the sortition DB is at the burnchain tip and (2) - /// all of our wanted tenures are marked as either downloaded or complete. - /// - /// To fully determine if it's appropriate to download unconfirmed tenures, the caller should - /// additionally ensure that there are no in-flight confirmed tenure downloads. - /// - /// This method is static to facilitate testing. - pub(crate) fn need_unconfirmed_tenures<'a>( - nakamoto_start_block: u64, - burnchain_height: u64, - sort_tip: &BlockSnapshot, - completed_tenures: &HashSet, - wanted_tenures: &[WantedTenure], - prev_wanted_tenures: &[WantedTenure], - tenure_block_ids: &HashMap, - pox_constants: &PoxConstants, - first_burn_height: u64, - inventory_iter: impl Iterator, - blocks_db: NakamotoStagingBlocksConnRef, - ) -> bool { - if sort_tip.block_height < burnchain_height { - test_debug!( - "sort_tip {} < burn tip {}", - sort_tip.block_height, - burnchain_height - ); - return false; - } - - if wanted_tenures.is_empty() { - test_debug!("No wanted tenures"); - return false; - } - - if prev_wanted_tenures.is_empty() { - test_debug!("No prev wanted tenures"); - return false; - } - - // there are still confirmed tenures we have to go and get - if Self::have_unprocessed_tenures( - pox_constants - .block_height_to_reward_cycle(first_burn_height, nakamoto_start_block) - .expect("FATAL: nakamoto starts before system start"), - completed_tenures, - prev_wanted_tenures, - tenure_block_ids, - pox_constants, - first_burn_height, - inventory_iter, - ) { - test_debug!("Still have unprocessed tenures, so we don't need unconfirmed tenures"); - return false; - } - - // see if we need any tenures still - for wt in wanted_tenures.iter() { - if completed_tenures.contains(&wt.tenure_id_consensus_hash) { - continue; - } - let is_available = tenure_block_ids - .iter() - .any(|(_, available)| available.contains_key(&wt.tenure_id_consensus_hash)); - - if is_available && !wt.processed { - return false; - } - } - - // there are still tenures that have to be processed - if blocks_db - .has_any_unprocessed_nakamoto_block() - .map_err(|e| { - warn!( - "Failed to determine if there are unprocessed Nakamoto blocks: {:?}", - &e - ); - e - }) - .unwrap_or(true) - { - test_debug!("Still have stored but unprocessed Nakamoto blocks"); - return false; - } - - true - } - - /// Select neighbors to query for unconfirmed tenures, given this node's view of the burnchain - /// and an iterator over the set of ongoing p2p conversations. - /// Only select neighbors that has the same burnchain view as us, and have authenticated to us - /// and are outbound from us (meaning, they're not NAT'ed relative to us). - pub(crate) fn make_unconfirmed_tenure_download_schedule<'a>( - chain_view: &BurnchainView, - peers_iter: impl Iterator, - ) -> VecDeque { - let mut schedule = VecDeque::new(); - for (_, convo) in peers_iter { - if chain_view.burn_block_hash != convo.burnchain_tip_burn_header_hash { - continue; - } - if chain_view.burn_block_height != convo.burnchain_tip_height { - continue; - } - if !convo.is_authenticated() { - continue; - } - if !convo.is_outbound() { - continue; - } - schedule.push_back(convo.to_neighbor_address()); - } - schedule - } - - /// Create up to `count` unconfirmed tenure downloaders. Add them to `downloaders`, and remove - /// the remote peer's address from `schedule`. - /// - /// The caller will need to ensure that no request to the ongoing unconfirmed tenure - /// downloaders gets created, lest it replace the unconfirmed tenure request. - /// - /// This method removes items from `schedule` and adds unconfirmed downloaders to - /// `downloaders`. - /// - /// This method is static to facilitate testing. - pub(crate) fn make_unconfirmed_tenure_downloaders( - schedule: &mut VecDeque, - count: usize, - downloaders: &mut HashMap, - highest_processed_block_id: Option, - ) { - while downloaders.len() < count { - let Some(naddr) = schedule.front() else { - break; - }; - if downloaders.contains_key(naddr) { - continue; - } - let unconfirmed_tenure_download = NakamotoUnconfirmedTenureDownloader::new( - naddr.clone(), - highest_processed_block_id.clone(), - ); - - test_debug!("Request unconfirmed tenure state from neighbor {}", &naddr); - downloaders.insert(naddr.clone(), unconfirmed_tenure_download); - schedule.pop_front(); - } - } - - /// Update our unconfirmed tenure download state machines - fn update_unconfirmed_tenure_downloaders( - &mut self, - count: usize, - highest_processed_block_id: Option, - ) { - Self::make_unconfirmed_tenure_downloaders( - &mut self.unconfirmed_tenure_download_schedule, - count, - &mut self.unconfirmed_tenure_downloads, - highest_processed_block_id, - ); - } - - /// Run unconfirmed tenure download state machines. - /// * Update the highest-processed block in each downloader to our highest-processed block - /// * Send any HTTP requests that the downloaders indicate are needed (if they are not blocked - /// waiting for a response) - /// * Obtain any HTTP responses and pass them into the downloaders, thereby advancing their - /// states - /// * Obtain downloaded blocks, and create new confirmed tenure downloaders for the - /// highest-complete tenure downloader. - /// * Clear out downloader state for peers who have disconnected or have finished processing - /// their machines. - /// - /// As the local node processes blocks, update each downloader's view of the highest-processed - /// block so it can cancel itself early if it finds that we've already got the blocks, or if - /// another peer indicates that it has a higher block. - /// - /// This method guarantees that the highest confirmed tenure downloaders instantiated here can - /// be safely run without clobbering ongoing conversations with other neighbors, _provided - /// that_ the download state machine is currently concerned with running unconfirmed tenure - /// downloaders (i.e. it's not in IBD). - /// - /// This method is static to facilitate testing. - /// - /// Returns the map from neighbors to the unconfirmed blocks they serve, as well as a map from - /// neighbors to the instantiated confirmed tenure downloaders for their highest completed - /// tenures (this information cannot be determined from sortition history and block inventories - /// alone, since we need to know the tenure-start block from the ongoing tenure). - pub(crate) fn run_unconfirmed_downloaders( - downloaders: &mut HashMap, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - sortdb: &SortitionDB, - sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - highest_complete_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, - ) -> ( - HashMap>, - HashMap, - ) { - let addrs: Vec<_> = downloaders.keys().map(|addr| addr.clone()).collect(); - let mut finished = vec![]; - let mut unconfirmed_blocks = HashMap::new(); - let mut highest_completed_tenure_downloaders = HashMap::new(); - - // find the highest-processed block, and update all ongoing state-machines. - // Then, as faster state-machines linked to more up-to-date peers download newer blocks, - // other state-machines will automatically terminate once they reach the highest block this - // peer has now processed. - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); - let highest_processed_block_height = network.stacks_tip.2; - - for (_, downloader) in downloaders.iter_mut() { - downloader.set_highest_processed_block( - highest_processed_block_id.clone(), - highest_processed_block_height, - ); - } - - // send requests - for (naddr, downloader) in downloaders.iter_mut() { - if downloader.is_done() { - finished.push(naddr.clone()); - continue; - } - if neighbor_rpc.has_inflight(&naddr) { - continue; - } - - test_debug!( - "Send request to {} for tenure {:?} (state {})", - &naddr, - &downloader.unconfirmed_tenure_id(), - &downloader.state - ); - if let Err(e) = downloader.send_next_download_request(network, neighbor_rpc) { - debug!( - "Downloader for {} failed; this peer is dead: {:?}", - &naddr, &e - ); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - downloaders.remove(naddr); - } - } - for done_naddr in finished.drain(..) { - downloaders.remove(&done_naddr); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(downloader) = downloaders.get_mut(&naddr) else { - test_debug!("Got rogue response from {}", &naddr); - continue; - }; - - test_debug!("Got response from {}", &naddr); - let Ok(blocks_opt) = downloader.handle_next_download_response( - response, - sortdb, - sort_tip, - chainstate, - &network.aggregate_public_keys, - ) else { - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - if let Some(highest_complete_tenure_downloader) = downloader - .make_highest_complete_tenure_downloader( - highest_complete_tenure, - unconfirmed_tenure, - ) - .map_err(|e| { - warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e - }) - .ok() - { - // don't start this unless the downloader is actually done (this should always be - // the case, but don't tempt fate with an assert!) - if downloader.is_done() { - highest_completed_tenure_downloaders - .insert(naddr.clone(), highest_complete_tenure_downloader); - } - } - - unconfirmed_blocks.insert(naddr.clone(), blocks); - if downloader.is_done() { - finished.push(naddr); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - downloaders.remove(naddr); - } - } - for done_naddr in finished.iter() { - downloaders.remove(done_naddr); - } - - (unconfirmed_blocks, highest_completed_tenure_downloaders) - } - - /// Run and process all confirmed tenure downloaders, and do the necessary bookkeeping to deal - /// with failed peer connections. - /// - /// At most `max_count` downloaders will be instantiated at once. - /// - /// Returns the set of downloaded confirmed tenures obtained. - fn download_confirmed_tenures( - &mut self, - network: &mut PeerNetwork, - max_count: usize, - ) -> HashMap> { - // queue up more downloaders - self.update_tenure_downloaders(max_count, &network.aggregate_public_keys); - - // run all downloaders - let new_blocks = self.tenure_downloads.run(network, &mut self.neighbor_rpc); - - // give blocked downloaders their tenure-end blocks from other downloaders that have - // obtained their tenure-start blocks - let new_tenure_starts = self.tenure_downloads.find_new_tenure_start_blocks(); - self.tenure_start_blocks - .extend(new_tenure_starts.into_iter()); - - let dead = self - .tenure_downloads - .handle_tenure_end_blocks(&self.tenure_start_blocks); - - // bookkeeping - for naddr in dead.into_iter() { - self.neighbor_rpc.add_dead(network, &naddr); - } - - new_blocks - } - - /// Run and process all unconfirmed tenure downloads, and highest complete tenure downloads. - /// Do the needful bookkeeping to remove dead peers. - fn download_unconfirmed_tenures( - &mut self, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - highest_processed_block_id: Option, - ) -> HashMap> { - // queue up more downloaders - self.update_unconfirmed_tenure_downloaders( - usize::try_from(network.get_connection_opts().max_inflight_blocks) - .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), - highest_processed_block_id, - ); - - // run all unconfirmed downloaders, and start confirmed downloaders for the - // highest complete tenure - let burnchain_tip = network.burnchain_tip.clone(); - let Some(unconfirmed_tenure) = self - .wanted_tenures - .last() - .map(|wt| Some(wt.clone())) - .unwrap_or_else(|| { - // unconfirmed tenure is the last tenure in prev_wanted_tenures if - // wanted_tenures.len() is 0 - let prev_wanted_tenures = self.prev_wanted_tenures.as_ref()?; - let wt = prev_wanted_tenures.last()?; - Some(wt.clone()) - }) - else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - - // Get the highest WantedTenure. This will be the WantedTenure whose winning block hash is - // the start block hash of the highest complete tenure, and whose consensus hash is the - // tenure ID of the ongoing tenure. It corresponds to the highest sortition for which - // there exists a tenure. - // - // There are three possibilities for obtaining this, based on what we know about tenures - // from the sortition DB and the peers' inventories: - // - // Case 1: There are no sortitions yet in the current reward cycle, so this is the - // second-to-last WantedTenure in the last reward cycle's WantedTenure list. - // - // Case 2: There is one sortition in the current reward cycle, so this is the last - // WantedTenure in the last reward cycle's WantedTenure list - // - // Case 3: There are two or more sortitions in the current reward cycle, so this is the - // second-to-last WantedTenure in the current reward cycle's WantedTenure list. - let highest_wanted_tenure = if self.wanted_tenures.is_empty() { - // highest complete wanted tenure is the second-to-last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - // not initialized yet (technically unrachable) - return HashMap::new(); - }; - if prev_wanted_tenures.len() < 2 { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.get(prev_wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - } else if self.wanted_tenures.len() == 1 { - // highest complete tenure is the last tenure in prev_wanted_tenures - let Some(prev_wanted_tenures) = self.prev_wanted_tenures.as_ref() else { - return HashMap::new(); - }; - let Some(wt) = prev_wanted_tenures.last() else { - return HashMap::new(); - }; - wt.clone() - } else { - // highest complete tenure is the second-to-last tenure in wanted_tenures - let Some(wt) = self - .wanted_tenures - .get(self.wanted_tenures.len().saturating_sub(2)) - else { - return HashMap::new(); - }; - wt.clone() - }; - - // Run the confirmed downloader state machine set, since we could already be processing the - // highest complete tenure download. NOTE: due to the way that we call this method, we're - // guaranteed that if the `tenure_downloads` downloader set has any downloads at all, they - // will only be for the highest complete tenure (i.e. we only call this method if we've - // already downloaded all confirmed tenures), so there's no risk of clobberring any other - // in-flight requests. - let new_confirmed_blocks = if self.tenure_downloads.inflight() > 0 { - self.download_confirmed_tenures(network, 0) - } else { - HashMap::new() - }; - - // Only run unconfirmed downloaders if we're _not_ busy obtaining the highest confirmed - // tenure. The behavior here ensures that we first obtain the highest complete tenure, and - // then poll for new unconfirmed tenure blocks. - let (new_unconfirmed_blocks, new_highest_confirmed_downloaders) = - if self.tenure_downloads.inflight() > 0 { - (HashMap::new(), HashMap::new()) - } else { - Self::run_unconfirmed_downloaders( - &mut self.unconfirmed_tenure_downloads, - network, - &mut self.neighbor_rpc, - sortdb, - &burnchain_tip, - chainstate, - &highest_wanted_tenure, - &unconfirmed_tenure, - ) - }; - - // schedule downloaders for the highest-confirmed tenure, if we generated any - self.tenure_downloads - .add_downloaders(new_highest_confirmed_downloaders.into_iter()); - - // coalesce blocks -- maps consensus hash to map of block id to block - let mut coalesced_blocks: HashMap> = - HashMap::new(); - for blocks in new_unconfirmed_blocks - .into_values() - .chain(new_confirmed_blocks.into_values()) - { - for block in blocks.into_iter() { - let block_id = block.header.block_id(); - if let Some(block_map) = coalesced_blocks.get_mut(&block.header.consensus_hash) { - block_map.insert(block_id, block); - } else { - let mut block_map = HashMap::new(); - let ch = block.header.consensus_hash.clone(); - block_map.insert(block_id, block); - coalesced_blocks.insert(ch, block_map); - } - } - } - - coalesced_blocks - .into_iter() - .map(|(consensus_hash, block_map)| { - let mut block_list: Vec<_> = - block_map.into_iter().map(|(_, block)| block).collect(); - block_list.sort_unstable_by_key(|blk| blk.header.chain_length); - (consensus_hash, block_list) - }) - .collect() - } - - /// Top-level download state machine execution. - /// - /// The downloader transitions between two states in perpetuity: obtaining confirmed tenures, - /// and obtaining the unconfirmed tenure and the highest complete tenure. - /// - /// The system starts out in the "confirmed" mode, since the node must first download all - /// confirmed tenures before it can process the chain tip. But once all confirmed tenures have - /// been downloaded, the system transitions to "unconfirmed" mode whereby it attempts to - /// download the highest complete tenure and any new unconfirmed tenure blocks. It stays in - /// "unconfirmed" mode until there are new confirmed tenures to fetch (which shouldn't happen - /// unless this node misses a few sortitions, such as due to a restart). - fn run_downloads( - &mut self, - burnchain_height: u64, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ibd: bool, - ) -> HashMap> { - debug!("NakamotoDownloadStateMachine in state {}", &self.state); - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - test_debug!("No network inventories"); - return HashMap::new(); - }; - test_debug!( - "run_downloads: burnchain_height={}, network.burnchain_tip.block_height={}", - burnchain_height, - network.burnchain_tip.block_height - ); - self.update_available_tenures( - &invs.inventories, - &sortdb.pox_constants, - sortdb.first_block_height, - ibd, - ); - - match self.state { - NakamotoDownloadState::Confirmed => { - let new_blocks = self.download_confirmed_tenures( - network, - usize::try_from(network.get_connection_opts().max_inflight_blocks) - .expect("FATAL: max_inflight_blocks exceeds usize::MAX"), - ); - - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - test_debug!("No network inventories"); - return HashMap::new(); - }; - - debug!( - "tenure_downloads.is_empty: {}", - self.tenure_downloads.is_empty() - ); - if self.tenure_downloads.is_empty() - && Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - chainstate.nakamoto_blocks_db(), - ) - { - debug!( - "Transition from {} to {}", - &self.state, - NakamotoDownloadState::Unconfirmed - ); - - self.unconfirmed_tenure_download_schedule = - Self::make_unconfirmed_tenure_download_schedule( - &network.chain_view, - network.iter_peer_convos(), - ); - self.state = NakamotoDownloadState::Unconfirmed; - } - - return new_blocks; - } - NakamotoDownloadState::Unconfirmed => { - let highest_processed_block_id = - StacksBlockId::new(&network.stacks_tip.0, &network.stacks_tip.1); - - let new_blocks = self.download_unconfirmed_tenures( - network, - sortdb, - chainstate, - Some(highest_processed_block_id), - ); - - // keep borrow-checker happy by instantiang this ref again, now that `network` is - // no longer mutably borrowed. - let Some(invs) = network.inv_state_nakamoto.as_ref() else { - // nothing to do - test_debug!("No network inventories"); - return HashMap::new(); - }; - - if self.tenure_downloads.is_empty() - && self.unconfirmed_tenure_downloads.is_empty() - && self.unconfirmed_tenure_download_schedule.is_empty() - { - if Self::need_unconfirmed_tenures( - self.nakamoto_start_height, - burnchain_height, - &network.burnchain_tip, - &self.tenure_downloads.completed_tenures, - &self.wanted_tenures, - self.prev_wanted_tenures.as_ref().unwrap_or(&vec![]), - &self.tenure_block_ids, - &sortdb.pox_constants, - sortdb.first_block_height, - invs.inventories.values(), - chainstate.nakamoto_blocks_db(), - ) { - // do this again - self.unconfirmed_tenure_download_schedule = - Self::make_unconfirmed_tenure_download_schedule( - &network.chain_view, - network.iter_peer_convos(), - ); - debug!( - "Transition from {} to {}", - &self.state, - NakamotoDownloadState::Unconfirmed - ); - self.state = NakamotoDownloadState::Unconfirmed; - } else { - debug!( - "Transition from {} to {}", - &self.state, - NakamotoDownloadState::Confirmed - ); - self.state = NakamotoDownloadState::Confirmed; - } - } - - return new_blocks; - } - } - } - - /// Go and get tenures. Returns list of blocks per tenure, identified by consensus hash. - /// The blocks will be sorted by height, but may not be contiguous. - pub fn run( - &mut self, - burnchain_height: u64, - network: &mut PeerNetwork, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ibd: bool, - ) -> Result>, NetError> { - self.update_wanted_tenures(&network, sortdb, chainstate)?; - self.update_processed_tenures(chainstate)?; - let new_blocks = self.run_downloads(burnchain_height, network, sortdb, chainstate, ibd); - self.last_sort_tip = Some(network.burnchain_tip.clone()); - Ok(new_blocks) - } -} diff --git a/stackslib/src/net/download/nakamoto/mod.rs b/stackslib/src/net/download/nakamoto/mod.rs deleted file mode 100644 index ddef9796810..00000000000 --- a/stackslib/src/net/download/nakamoto/mod.rs +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -//! This file contains the Nakamoto block downloader implementation. -//! -//! # Overview -//! -//! The downloader is implemented as a network state machine, which is called from the main event -//! loop of the p2p network. On each pass, the downloader state machine inspects the Stacks chain -//! state and peer block inventories to see if there are any tenures to download, and if so, it -//! queues up HTTP requests for the blocks and reacts to their responses. It yields the downloaded -//! blocks, which the p2p main loop yields in its `NetworkResult` for the relayer to consume. -//! -//! # Design -//! -//! The state machine has three layers: a top-level state machine for managing all of -//! the requisite state for identifying tenures to download, a pair of low-level state machines for -//! fetching individual tenures, and a middle layer for using the tenure data to drive the low-level -//! state machines to fetch the requisite tenures. -//! -//! The three-layer design is meant to provide a degree of encapsulation of each downloader -//! concern. Because downloading tenures is a multi-step process, we encapsulate the steps to -//! download a single tenure into a low-level state machine which can be driven by separate -//! flow-control. Because we can drive multiple tenure downloads in parallel (i.e. one per peer), -//! we have a middle layer for scheduling tenures to peers for download. This middle layer manages -//! the lifecycles of the lower layer state machines. The top layer is needed to interface the -//! middle layer to the chainstate and the rest of the p2p network, and as such, handles the -//! bookkeeping so that the lower layers can operate without needing access to this -//! otherwise-unrelated concern. -//! -//! ## NakamotoDownloadStateMachine -//! -//! The top-level download state machine (`NakamotoDownloadStateMachine`) has two states: -//! Obtaining confirmed tenures, and obtaining unconfirmed tenures. A _confirmed_ tenure is a -//! tenure for which we can obtain the start and end block hashes using peer inventories and the -//! sortition DB. The hashes are embedded within sortition winners, and the inventories tell us -//! which sortitions correspond to tenure-starts and tenure-ends (each tenure-end is the -//! tenure-start of the next tenure). An _unconfirmed_ tenure is a tenure that is not confirmed -- -//! we do not have one or both of its start/end block hashes available from the sortition history -//! since they have not been recorded yet. -//! -//! The `NakamotoDownloadStateMachine` operates by attempting to download each reward cycle's -//! tenures, including the current reward cycle. Once it has obtained them all for the current -//! reward cycle, it proceeds to fetch the next reward cycle's tenures. It does this because the -//! sortition DB itself cannot inform us of the tenure start/end block hashes in a given reward -//! cycle until the PoX anchor block mined in the previous reward cycle has been downloaded and -//! processed. -//! -//! To achieve this, the `NakamotoDwonloadStateMachine` performs a lot of bookkeeping. Namely, it -//! keeps track of: -//! -//! * The ongoing and prior reward cycle's sortitions' tenure IDs and winning block hashes -//! (implemented as lists of `WantedTenure`s) -//! * Which sortitions correspond to tenure start and end blocks (implemented as a table of -//! `TenureStartEnd`s) -//! * Which neighbors can serve which full tenures -//! * What order to request tenures in -//! -//! This information is consumed by the lower levels of the state machine. -//! -//! ## `NakamotoTenureDownloadSet` -//! -//! Naturally, the `NakamotoDownloadStateMachine` contains two code paths -- one for each mode. -//! To facilitate confirmed tenure downloads, it has a second-layer state machine called -//! the `NakamotoTenureDownloadSet`. This is responsible for identifying and issuing requests to -//! peers which can serve complete tenures, and keeping track of whether or not the current reward -//! cycle has any remaining tenures to download. To facilitate unconfirmed tenure downloads (which -//! is a much simpler task), it simply provides an internal method for issuing requests and -//! processing responses for its neighbors' unconfirmed tenure data. -//! -//! This middle layer consumes the data mantained by the `,akamotoDownloaderStateMachine` in order -//! to instantiate, drive, and clean up one or more per-tenure download state machines. -//! -//! ## `NakamotoTenureDownloader` and `NakamotoUnconfirmedTenureDownloader` -//! -//! Per SIP-021, obtaining a confirmed tenure is a multi-step process. To carry this out, this -//! module contains two third-level state machines: `NakamotoTenureDownloader`, which downloads a -//! single tenure's blocks if the start and end block hash are known, and -//! `NakamotoUnconfirmedTenureDownloader`, which downloads the ongoing tenure. The -//! `NakamotoTenureDownloadSet` uses a set of `NakamotoTenureDownloader` instances (one per -//! neighbor) to fetch confirmed tenures, and the `NakamotoDownloadStateMachine`'s unconfirmed -//! tenure download state provides a method for driving a set of -//! `NakamotoUnconfirmedTenureDownloader` machines to poll neighbors for their latest tenure -//! blocks. -//! -//! # Implementation -//! -//! The implementation here plugs directly into the p2p state machine, and is called once per pass. -//! Unlike in Stacks 2.x, the downloader is consistently running, and can act on newly-discovered -//! tenures once a peer's inventory reports their availability. This is because Nakamoto is more -//! latency-sensitive than Stacks 2.x, and nodes need to obtain blocks as quickly as possible. -//! -//! Concerning latency, a lot of attention is paid to reducing the amount of gratuitous I/O -//! required for the state machine to run. The bookkeeping steps in the -//! `NakamotoDownloadStateMachine` may seem tedious, but they are specifically designed to only -//! load new sortition and chainstate data when it is necessary to do so. Most of the time, the -//! downloader never touches disk; it only needs to do so when it is considering new sortitions and -//! new chain tips. - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -mod download_state_machine; -mod tenure; -mod tenure_downloader; -mod tenure_downloader_set; -mod tenure_downloader_unconfirmed; - -pub use crate::net::download::nakamoto::download_state_machine::{ - NakamotoDownloadState, NakamotoDownloadStateMachine, -}; -pub use crate::net::download::nakamoto::tenure::{AvailableTenures, TenureStartEnd, WantedTenure}; -pub use crate::net::download::nakamoto::tenure_downloader::{ - NakamotoTenureDownloadState, NakamotoTenureDownloader, -}; -pub use crate::net::download::nakamoto::tenure_downloader_set::NakamotoTenureDownloaderSet; -pub use crate::net::download::nakamoto::tenure_downloader_unconfirmed::{ - NakamotoUnconfirmedDownloadState, NakamotoUnconfirmedTenureDownloader, -}; - -impl PeerNetwork { - /// Set up the Nakamoto block downloader - pub fn init_nakamoto_block_downloader(&mut self) { - if self.block_downloader_nakamoto.is_some() { - return; - } - let epoch = self.get_epoch_by_epoch_id(StacksEpochId::Epoch30); - let downloader = NakamotoDownloadStateMachine::new(epoch.start_height); - self.block_downloader_nakamoto = Some(downloader); - } - - /// Drive the block download state machine - pub fn sync_blocks_nakamoto( - &mut self, - burnchain_height: u64, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ibd: bool, - ) -> Result>, NetError> { - if self.block_downloader_nakamoto.is_none() { - self.init_nakamoto_block_downloader(); - } - let Some(mut block_downloader) = self.block_downloader_nakamoto.take() else { - return Ok(HashMap::new()); - }; - - let new_blocks_res = block_downloader.run(burnchain_height, self, sortdb, chainstate, ibd); - self.block_downloader_nakamoto = Some(block_downloader); - - new_blocks_res - } - - /// Perform block sync. - /// Drive the state machine, and clear out any dead and banned neighbors - pub fn do_network_block_sync_nakamoto( - &mut self, - burnchain_height: u64, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ibd: bool, - ) -> Result>, NetError> { - let res = self.sync_blocks_nakamoto(burnchain_height, sortdb, chainstate, ibd)?; - - let Some(mut block_downloader) = self.block_downloader_nakamoto.take() else { - return Ok(res); - }; - - for broken in block_downloader.neighbor_rpc.take_broken() { - self.deregister_and_ban_neighbor(&broken); - } - - for dead in block_downloader.neighbor_rpc.take_dead() { - self.deregister_neighbor(&dead); - } - - self.block_downloader_nakamoto = Some(block_downloader); - Ok(res) - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs deleted file mode 100644 index 53563ab3345..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ /dev/null @@ -1,348 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A tenure that this node needs data for. -#[derive(Debug, PartialEq, Clone)] -pub struct WantedTenure { - /// Consensus hash that identifies the start of the tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Winning block-commit block ID for this tenure's snapshot (NOTE THAT THIS IS NOT THE - /// TENURE-START BLOCK FOR THIS TENURE). - pub winning_block_id: StacksBlockId, - /// burnchain block height of this tenure ID consensus hash - pub burn_height: u64, - /// Whether or not this tenure has been acted upon (i.e. set to true if there's no need to - /// download it) - pub processed: bool, -} - -impl WantedTenure { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - winning_block_id: StacksBlockId, - burn_height: u64, - ) -> Self { - Self { - tenure_id_consensus_hash, - winning_block_id, - burn_height, - processed: false, - } - } -} - -/// A tenure's start and end blocks. This is constructed from a sequence of `WantedTenure`s and a -/// node's inventory vector over them. -#[derive(Debug, PartialEq, Clone)] -pub struct TenureStartEnd { - /// Consensus hash that identifies the start of the tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Tenure-start block ID - pub start_block_id: StacksBlockId, - /// Last block ID - pub end_block_id: StacksBlockId, - /// Whether or not to fetch the end-block of this tenure directly. This is decided based on - /// where the tenure falls in the reward cycle (e.g. if it's the last complete tenure in the - /// reward cycle). - pub fetch_end_block: bool, - /// Reward cycle of the start block - pub start_reward_cycle: u64, - /// Reward cycle of the end block - pub end_reward_cycle: u64, - /// Whether or not this tenure has been processed - pub processed: bool, -} - -pub type AvailableTenures = HashMap; - -impl TenureStartEnd { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - start_block_id: StacksBlockId, - end_block_id: StacksBlockId, - start_reward_cycle: u64, - end_reward_cycle: u64, - processed: bool, - ) -> Self { - Self { - tenure_id_consensus_hash, - start_block_id, - end_block_id, - start_reward_cycle, - end_reward_cycle, - fetch_end_block: false, - processed, - } - } - - /// Given a list of wanted tenures and a peer's inventory bitvectors over the same range of - /// tenures, calculate the list of start/end blocks for each wanted tenure. - /// - /// Recall that in Nakamoto, a block-commit commits to the parent tenure's first block. So if - /// bit i is set (i.e. `wanted_tenures[i]` has tenure data), then it really means that the tenure - /// start block is the winning block hash in the _subsequent_ `wanted_tenures` list item for which - /// its corresponding bit is 1. Similarly, the end block is the winning block hash in the - /// `wanted_tenures` list item _after that_ whose bit is 1. - /// - /// As such, this algorithm needs to search not only the wanted tenures and inventories for - /// this reward cycle, but also the next. - /// - /// The `wanted_tenures` and `next_wanted_tenures` values must be aligned to reward cycle - /// boundaries (mod 0). The code uses this assumption to assign reward cycles to blocks in the - /// `TenureStartEnd`s in the returned `AvailableTenures` map. - /// - /// Returns the set of available tenures for all tenures in `wanted_tenures` that can be found - /// with the available information. - /// Returns None if there is no inventory data for the given reward cycle. - pub fn from_inventory( - rc: u64, - wanted_tenures: &[WantedTenure], - next_wanted_tenures: Option<&[WantedTenure]>, - pox_constants: &PoxConstants, - first_burn_height: u64, - invs: &NakamotoTenureInv, - ) -> Option { - // if bit i is set, that means that the tenure data for the ith tenure in the sortition - // history was present. But given that block-commits commit to the start block of the - // parent tenure, the start-block ID for tenure i would be the StacksBlockId for the - // next-available tenure. Its end-block ID would be the StacksBlockId for the - // next-available tenure after that. - let invbits = invs.tenures_inv.get(&rc)?; - let mut tenure_block_ids = AvailableTenures::new(); - let mut last_tenure = 0; - let mut last_tenure_ch = None; - for (i, wt) in wanted_tenures.iter().enumerate() { - // advance to next tenure-start sortition - let bit = u16::try_from(i).expect("FATAL: more sortitions than u16::MAX"); - if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); - /* - i += 1; - */ - continue; - } - - // the last tenure we'll consider - last_tenure = i; - - let Some(wt_start_idx) = ((i + 1)..wanted_tenures.len()).find(|j| { - let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); - invbits.get(bit).unwrap_or(false) - }) else { - test_debug!("i={} out of wanted_tenures", i); - break; - }; - - let Some(wt_start) = wanted_tenures.get(wt_start_idx) else { - test_debug!("i={} no start wanted tenure", i); - break; - }; - - let Some(wt_end_index) = ((wt_start_idx + 1)..wanted_tenures.len()).find(|j| { - let bit = u16::try_from(*j).expect("FATAL: more sortitions than u16::MAX"); - invbits.get(bit).unwrap_or(false) - }) else { - test_debug!("i={} out of wanted_tenures", i); - break; - }; - - let Some(wt_end) = wanted_tenures.get(wt_end_index) else { - test_debug!("i={} no end wanted tenure", i); - break; - }; - - let tenure_start_end = TenureStartEnd::new( - wt.tenure_id_consensus_hash.clone(), - wt_start.winning_block_id.clone(), - wt_end.winning_block_id.clone(), - rc, - rc, - wt.processed, - ); - test_debug!( - "i={}, len={}; {:?}", - i, - wanted_tenures.len(), - &tenure_start_end - ); - last_tenure_ch = Some(wt.tenure_id_consensus_hash.clone()); - tenure_block_ids.insert(wt.tenure_id_consensus_hash.clone(), tenure_start_end); - } - - let Some(next_wanted_tenures) = next_wanted_tenures else { - // nothing more to do - test_debug!("No next_wanted_tenures"); - return Some(tenure_block_ids); - }; - - // `wanted_tenures` was a full reward cycle, so be sure to fetch the tenure-end block of - // the last tenure derived from it - if let Some(last_tenure_ch) = last_tenure_ch.take() { - if let Some(last_tenure) = tenure_block_ids.get_mut(&last_tenure_ch) { - test_debug!( - "Will directly fetch end-block {} for tenure {}", - &last_tenure.end_block_id, - &last_tenure.tenure_id_consensus_hash - ); - last_tenure.fetch_end_block = true; - } - } - - let Some(next_invbits) = invs.tenures_inv.get(&rc.saturating_add(1)) else { - // nothing more to do - test_debug!("no inventory for cycle {}", rc.saturating_add(1)); - return Some(tenure_block_ids); - }; - - // start iterating from `last_tenures` - let iter_start = last_tenure; - let iterator = wanted_tenures.get(iter_start..).unwrap_or(&[]); - for (i, wt) in iterator.iter().enumerate() { - test_debug!( - "consider next wanted tenure which starts with i={} {:?}", - iter_start + i, - &wt - ); - - // advance to next tenure-start sortition - let bit = u16::try_from(i + iter_start).expect("FATAL: more sortitions than u16::MAX"); - if !invbits.get(bit).unwrap_or(false) { - test_debug!("i={} bit not set", i); - continue; - } - - // search the remainder of `wanted_tenures`, and if we don't find the end-tenure, - // search `next_wanted_tenures` until we find the tenure-start wanted tenure for the - // ith wanted_tenure - let Some((in_next, wt_start_idx, wt_start)) = ((i + iter_start + 1) - ..wanted_tenures.len()) - .find_map(|j| { - // search `wanted_tenures` - let bit = u16::try_from(j).expect("FATAL: more sortitions than u16::MAX"); - if invbits.get(bit).unwrap_or(false) { - wanted_tenures.get(j).map(|tenure| (false, j, tenure)) - } else { - None - } - }) - .or_else(|| { - // search `next_wanted_tenures` - (0..next_wanted_tenures.len()).find_map(|n| { - let bit = u16::try_from(n).expect("FATAL: more sortitions than u16::MAX"); - if next_invbits.get(bit).unwrap_or(false) { - next_wanted_tenures.get(n).map(|tenure| (true, n, tenure)) - } else { - None - } - }) - }) - else { - test_debug!( - "i={} out of wanted_tenures and next_wanted_tenures", - iter_start + i - ); - break; - }; - - // search after the wanted tenure we just found to get the tenure-end wanted tenure. It - // is guaranteed to be in `next_wanted_tenures`, since otherwise we would have already - // found it - let next_start = if in_next { wt_start_idx + 1 } else { 0 }; - let Some(wt_end) = (next_start..next_wanted_tenures.len()).find_map(|k| { - let bit = u16::try_from(k).expect("FATAL: more sortitions than u16::MAX"); - if next_invbits.get(bit).unwrap_or(false) { - next_wanted_tenures.get(k) - } else { - None - } - }) else { - test_debug!("i={} out of next_wanted_tenures", iter_start + i); - break; - }; - - let mut tenure_start_end = TenureStartEnd::new( - wt.tenure_id_consensus_hash.clone(), - wt_start.winning_block_id.clone(), - wt_end.winning_block_id.clone(), - rc, - pox_constants - .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) - .expect("FATAL: tenure from before system start"), - wt.processed, - ); - tenure_start_end.fetch_end_block = true; - - test_debug!( - "i={},len={},next_len={}; {:?}", - iter_start + i, - wanted_tenures.len(), - next_wanted_tenures.len(), - &tenure_start_end - ); - tenure_block_ids.insert(wt.tenure_id_consensus_hash.clone(), tenure_start_end); - } - - Some(tenure_block_ids) - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs deleted file mode 100644 index c5ea7ba3450..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ /dev/null @@ -1,685 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for an historic tenure. This is a tenure for which we know the hashes of the -/// start and end block. This includes all tenures except for the two most recent ones. -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoTenureDownloadState { - /// Getting the tenure-start block (the given StacksBlockId is it's block ID). - GetTenureStartBlock(StacksBlockId), - /// Waiting for the child tenure's tenure-start block to arrive, which is usually (but not - /// always) handled by the execution of another NakamotoTenureDownloader. The only - /// exceptions are as follows: - /// - /// * if this tenure contains the anchor block, and it's the last tenure in the - /// reward cycle. In this case, the end-block must be directly fetched, since there will be no - /// follow-on NakamotTenureDownloader in the same reward cycle who can provide this. - /// - /// * if this tenure is the highest complete tenure, and we just learned the start-block of the - /// ongoing tenure, then a NakamotoTenureDownloader will be instantiated with this tenure-end-block - /// already known. This step will be skipped because the end-block is already present in the - /// state machine. - /// - /// * if the deadline (second parameter) is exceeded, the state machine transitions to - /// GetTenureEndBlock. - /// - /// The two fields here are: - /// * the block ID of the last block in the tenure (which happens to be the block ID of the - /// start block of the next tenure) - /// * the deadline by which this state machine needs to have obtained the tenure end-block - /// before transitioning to `GetTenureEndBlock`. - WaitForTenureEndBlock(StacksBlockId, Instant), - /// Getting the tenure-end block directly. This only happens for tenures whose end-blocks - /// cannot be provided by tenure downloaders within the same reward cycle, and for tenures in - /// which we cannot quickly get the tenure-end block. - /// - /// The field here is the block ID of the tenure end block. - GetTenureEndBlock(StacksBlockId), - /// Receiving tenure blocks. - /// The field here is the hash of the _last_ block in the tenure that must be downloaded. This - /// is because a tenure is fetched in order from highest block to lowest block. - GetTenureBlocks(StacksBlockId), - /// We have gotten all the blocks for this tenure - Done, -} - -pub const WAIT_FOR_TENURE_END_BLOCK_TIMEOUT: u64 = 1; - -impl fmt::Display for NakamotoTenureDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for an historic tenure -- a tenure for which the start and end block IDs -/// can be inferred from the chainstate and a peer's inventory (this excludes the two most recent -/// tenures). -/// -/// This state machine works as follows: -/// -/// 1. Fetch the first block in the given tenure -/// 2. Obtain the last block in the given tenure, via one of the following means: -/// a. Another NakamotoTenureDownloader's tenure-start block happens to be the end-block of this -/// machine's tenure, and can be copied into this machine. -/// b. This machine is configured to directly fetch the end-block. This only happens if this -/// tenure both contains the anchor block for the next reward cycle and happens to be the last -/// tenure in the current reward cycle. -/// c. This machine is given the end-block on instantiation. This only happens when the machine -/// is configured to fetch the highest complete tenure (i.e. the parent of the ongoing tenure); -/// in this case, the end-block is the start-block of the ongoing tenure. -/// 3. Obtain the blocks that lie between the first and last blocks of the tenure, in reverse -/// order. As blocks are found, their signer signatures will be validated against the aggregate -/// public key for this tenure; their hash-chain continuity will be validated against the start -/// and end block hashes; their quantity will be validated against the tenure-change transaction -/// in the end-block. -/// -/// Once the machine has reached the `Done` state, it will have obtained the entire run of Nakamoto -/// blocks for the given tenure (regardless of how many sortitions it straddles, and regardless of -/// whether or not it straddles a reward cycle boundary). -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoTenureDownloader { - /// Consensus hash that identifies this tenure - pub tenure_id_consensus_hash: ConsensusHash, - /// Stacks block ID of the tenure-start block. Learned from the inventory state machine and - /// sortition DB. - pub tenure_start_block_id: StacksBlockId, - /// Stacks block ID of the last block in this tenure (this will be the tenure-start block ID - /// for some other tenure). Learned from the inventory state machine and sortition DB. - pub tenure_end_block_id: StacksBlockId, - /// Address of who we're asking for blocks - pub naddr: NeighborAddress, - /// Aggregate public key that signed the start-block of this tenure - pub start_aggregate_public_key: Point, - /// Aggregate public key that signed the end-block of this tenure - pub end_aggregate_public_key: Point, - /// Whether or not we're idle -- i.e. there are no ongoing network requests associated with - /// this state machine. - pub idle: bool, - - /// What state we're in for downloading this tenure - pub state: NakamotoTenureDownloadState, - /// Tenure-start block - pub tenure_start_block: Option, - /// Pre-stored tenure end block (used by the unconfirmed block downloader). - /// An instance of this state machine will be used to fetch the highest-confirmed tenure, once - /// the start-block for the current tenure is downloaded. This is that start-block, which is - /// used to transition from the `WaitForTenureEndBlock` step to the `GetTenureBlocks` step. - pub tenure_end_block: Option, - /// Tenure-end block header and TenureChange - pub tenure_end_header: Option<(NakamotoBlockHeader, TenureChangePayload)>, - /// Tenure blocks - pub tenure_blocks: Option>, -} - -impl NakamotoTenureDownloader { - pub fn new( - tenure_id_consensus_hash: ConsensusHash, - tenure_start_block_id: StacksBlockId, - tenure_end_block_id: StacksBlockId, - naddr: NeighborAddress, - start_aggregate_public_key: Point, - end_aggregate_public_key: Point, - ) -> Self { - test_debug!( - "Instantiate downloader to {} for tenure {}", - &naddr, - &tenure_id_consensus_hash - ); - Self { - tenure_id_consensus_hash, - tenure_start_block_id, - tenure_end_block_id, - naddr, - start_aggregate_public_key, - end_aggregate_public_key, - idle: false, - state: NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block_id.clone()), - tenure_start_block: None, - tenure_end_header: None, - tenure_end_block: None, - tenure_blocks: None, - } - } - - /// Follow-on constructor used to instantiate a machine for downloading the highest-confirmed - /// tenure. This supplies the tenure end-block if known in advance. - pub fn with_tenure_end_block(mut self, tenure_end_block: NakamotoBlock) -> Self { - self.tenure_end_block = Some(tenure_end_block); - self - } - - /// Is this downloader waiting for the tenure-end block data from some other downloader? Per - /// the struct documentation, this is case 2(a). - pub fn is_waiting(&self) -> bool { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(..) = self.state { - return true; - } else { - return false; - } - } - - /// Validate and accept a given tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the start-block is valid. - /// Returns Err(..) if it is not valid. - pub fn try_accept_tenure_start_block( - &mut self, - tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoTenureDownloadState::GetTenureStartBlock(_) = &self.state else { - // not the right state for this - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if self.tenure_start_block_id != tenure_start_block.header.block_id() { - // not the block we were expecting - warn!("Invalid tenure-start block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_start_block" => %self.tenure_start_block_id, - "tenure_start_block ID" => %tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if !tenure_start_block - .header - .verify_signer(&self.start_aggregate_public_key) - { - // signature verification failed - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_start_block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-start block for tenure {} block={}", - &self.tenure_id_consensus_hash, - &tenure_start_block.block_id() - ); - self.tenure_start_block = Some(tenure_start_block); - - if let Some((hdr, _tc_payload)) = self.tenure_end_header.as_ref() { - // tenure_end_header supplied externally - self.state = NakamotoTenureDownloadState::GetTenureBlocks(hdr.parent_block_id.clone()); - } else if let Some(tenure_end_block) = self.tenure_end_block.take() { - // we already have the tenure-end block, so immediately proceed to accept it. - test_debug!( - "Preemptively process tenure-end block {} for tenure {}", - tenure_end_block.block_id(), - &self.tenure_id_consensus_hash - ); - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - tenure_end_block.block_id(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - self.try_accept_tenure_end_block(&tenure_end_block)?; - } else { - // need to get tenure_end_header. By default, assume that another - // NakamotoTenureDownloader will provide this block, and allow the - // NakamotoTenureDownloaderSet instance that manages a collection of these - // state-machines make the call to require this one to fetch the block directly. - self.state = NakamotoTenureDownloadState::WaitForTenureEndBlock( - self.tenure_end_block_id.clone(), - Instant::now() - .checked_add(Duration::new(WAIT_FOR_TENURE_END_BLOCK_TIMEOUT, 0)) - .ok_or(NetError::OverflowError("Deadline is too big".into()))?, - ); - } - Ok(()) - } - - /// Transition this state-machine from waiting for its tenure-end block from another - /// state-machine to directly fetching it. This only needs to happen if the tenure this state - /// machine is downloading contains the PoX anchor block, and it's also the last confirmed - /// tenurein this reward cycle. - /// - /// This function is called by `NakamotoTenureDownloadSet`, which instantiates, schedules, and - /// runs a set of these machines based on the peers' inventory vectors. But because we don't - /// know if this is the PoX anchor block tenure (or even the last tenure) until we have - /// inventory vectors for this tenure's reward cycle, this state-transition must be driven - /// after this machine's instantiation. - pub fn transition_to_fetch_end_block(&mut self) -> Result<(), NetError> { - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = self.state - else { - return Err(NetError::InvalidState); - }; - test_debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (direct transition)", - &self.naddr, - &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - Ok(()) - } - - /// Transition to fetching the tenure-end block directly if waiting has taken too long. - pub fn transition_to_fetch_end_block_on_timeout(&mut self) { - if let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, wait_deadline) = - self.state - { - if wait_deadline < Instant::now() { - test_debug!( - "Transition downloader to {} to directly fetch tenure-end block {} (timed out)", - &self.naddr, - &end_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id); - } - } - } - - /// Validate and accept a tenure-end block. If accepted, then advance the state. - /// Once accepted, this function extracts the tenure-change transaction and block header from - /// this block (it does not need the entire block). - /// - /// Returns Ok(()) if the block was valid - /// Returns Err(..) if the block was invalid - pub fn try_accept_tenure_end_block( - &mut self, - tenure_end_block: &NakamotoBlock, - ) -> Result<(), NetError> { - if !matches!( - &self.state, - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) - | NakamotoTenureDownloadState::GetTenureEndBlock(_) - ) { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - warn!("Invalid state -- tenure_start_block is not set"); - return Err(NetError::InvalidState); - }; - - if self.tenure_end_block_id != tenure_end_block.header.block_id() { - // not the block we asked for - warn!("Invalid tenure-end block: unexpected"; - "tenure_id" => %self.tenure_id_consensus_hash, - "tenure_id_end_block" => %self.tenure_end_block_id, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if !tenure_end_block - .header - .verify_signer(&self.end_aggregate_public_key) - { - // bad signature - warn!("Invalid tenure-end block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %tenure_end_block.header.block_id(), - "end_aggregate_public_key" => %self.end_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // extract the needful -- need the tenure-change payload (which proves that the tenure-end - // block is the tenure-start block for the next tenure) and the parent block ID (which is - // the next block to download). - let Ok(valid) = tenure_end_block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-end block: failed to validate tenure-start"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - if !valid { - warn!("Invalid tenure-end block: not a well-formed tenure-start block"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - } - - let Some(tc_payload) = tenure_end_block.try_get_tenure_change_payload() else { - warn!("Invalid tenure-end block: no tenure-change transaction"; - "block_id" => %tenure_end_block.block_id()); - return Err(NetError::InvalidMessage); - }; - - // tc_payload must point to the tenure-start block's header - if tc_payload.prev_tenure_consensus_hash != tenure_start_block.header.consensus_hash { - warn!("Invalid tenure-end block: tenure-change does not point to tenure-start block"; - "start_block_id" => %tenure_start_block.block_id(), - "end_block_id" => %tenure_end_block.block_id(), - "tc_payload.prev_tenure_consensus_hash" => %tc_payload.prev_tenure_consensus_hash, - "tenure_start.consensus_hash" => %tenure_start_block.header.consensus_hash); - return Err(NetError::InvalidMessage); - } - - debug!( - "Accepted tenure-end header for tenure {} block={}; expect {} blocks", - &self.tenure_id_consensus_hash, - &tenure_end_block.block_id(), - tc_payload.previous_tenure_blocks - ); - self.tenure_end_header = Some((tenure_end_block.header.clone(), tc_payload.clone())); - self.state = NakamotoTenureDownloadState::GetTenureBlocks( - tenure_end_block.header.parent_block_id.clone(), - ); - Ok(()) - } - - /// Determine how many blocks must be in this tenure. - /// Returns None if we don't have the start and end blocks yet. - pub fn tenure_length(&self) -> Option { - self.tenure_end_header - .as_ref() - .map(|(_hdr, tc_payload)| u64::from(tc_payload.previous_tenure_blocks)) - } - - /// Add downloaded tenure blocks to this machine. - /// If we have collected all tenure blocks, then return them and transition to the Done state. - /// - /// Returns Ok(Some([blocks])) if we got all the blocks in this tenure. The blocks will be in - /// ascending order by height, and will include the tenure-start block but exclude the - /// tenure-end block. - /// Returns Ok(None) if the given blocks were valid, but we still need more. The pointer to - /// the next block to fetch (stored in self.state) will be updated. - /// Returns Err(..) if the blocks were invalid. - pub fn try_accept_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoTenureDownloadState::GetTenureBlocks(block_cursor) = &self.state else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest - let mut expected_block_id = block_cursor; - let mut count = 0; - for block in tenure_blocks.iter() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - if !block.header.verify_signer(&self.start_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %self.tenure_id_consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "start_aggregate_public_key" => %self.start_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - expected_block_id = &block.header.parent_block_id; - count += 1; - if self - .tenure_blocks - .as_ref() - .map(|blocks| blocks.len()) - .unwrap_or(0) - .saturating_add(count) - > self.tenure_length().unwrap_or(0) as usize - { - // there are more blocks downloaded than indicated by the end-blocks tenure-change - // transaction. - warn!("Invalid blocks: exceeded {} tenure blocks", self.tenure_length().unwrap_or(0); - "tenure_id" => %self.tenure_id_consensus_hash, - "count" => %count, - "tenure_length" => self.tenure_length().unwrap_or(0), - "num_blocks" => tenure_blocks.len()); - return Err(NetError::InvalidMessage); - } - } - - if let Some(blocks) = self.tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.tenure_blocks = Some(tenure_blocks); - } - - // did we reach the tenure start block? - let Some(blocks) = self.tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got None)"); - return Err(NetError::InvalidState); - }; - - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - let Some(tenure_start_block) = self.tenure_start_block.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no tenure-start block (infallible)"); - return Err(NetError::InvalidState); - }; - - test_debug!( - "Accepted tenure blocks for tenure {} cursor={} ({})", - &self.tenure_id_consensus_hash, - &block_cursor, - count - ); - if earliest_block.block_id() != tenure_start_block.block_id() { - // still have more blocks to download - let next_block_id = earliest_block.header.parent_block_id.clone(); - debug!( - "Need more blocks for tenure {} (went from {} to {}, next is {})", - &self.tenure_id_consensus_hash, - &block_cursor, - &earliest_block.block_id(), - &next_block_id - ); - self.state = NakamotoTenureDownloadState::GetTenureBlocks(next_block_id); - return Ok(None); - } - - // finished! - self.state = NakamotoTenureDownloadState::Done; - Ok(self - .tenure_blocks - .take() - .map(|blocks| blocks.into_iter().rev().collect())) - } - - /// Produce the next HTTP request that, when successfully executed, will fetch the data needed - /// to advance this state machine. - /// Not all states require an HTTP request for advanceement. - /// - /// Returns Ok(Some(request)) if a request is needed - /// Returns Ok(None) if a request is not needed (i.e. we're waiting for some other machine's - /// state) - /// Returns Err(()) if we're done. - pub fn make_next_download_request( - &self, - peerhost: PeerHost, - ) -> Result, ()> { - let request = match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(start_block_id) => { - test_debug!("Request tenure-start block {}", &start_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, start_block_id.clone()) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(_block_id, _deadline) => { - // we're waiting for some other downloader's block-fetch to complete - test_debug!( - "Waiting for tenure-end block {} until {:?}", - &_block_id, - _deadline - ); - return Ok(None); - } - NakamotoTenureDownloadState::GetTenureEndBlock(end_block_id) => { - test_debug!("Request tenure-end block {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_block(peerhost, end_block_id.clone()) - } - NakamotoTenureDownloadState::GetTenureBlocks(end_block_id) => { - test_debug!("Downloading tenure ending at {}", &end_block_id); - StacksHttpRequest::new_get_nakamoto_tenure(peerhost, end_block_id.clone(), None) - } - NakamotoTenureDownloadState::Done => { - // nothing more to do - return Err(()); - } - }; - Ok(Some(request)) - } - - /// Begin the next download request for this state machine. The request will be sent to the - /// data URL corresponding to self.naddr. - /// Returns Ok(true) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. - /// Returns Ok(false) if not (e.g. neighbor is known to be dead or broken) - /// Returns Err(..) if self.naddr is known to be a dead or broken peer, or if we were unable to - /// resolve its data URL to a socket address. - pub fn send_next_download_request( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result { - if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); - return Ok(true); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let request = match self.make_next_download_request(peerhost) { - Ok(Some(request)) => request, - Ok(None) => { - return Ok(true); - } - Err(_) => { - return Ok(false); - } - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - self.idle = false; - Ok(true) - } - - /// Handle a received StacksHttpResponse and advance the state machine. - /// If we get the full tenure's blocks, then return them. - /// Returns Ok(Some([blocks])) if we successfully complete the state machine. - /// Returns Ok(None) if we accepted the response and did a state-transition, but we're not done - /// yet. The caller should now call `send_next_download_request()` - /// Returns Err(..) on failure to process the response. - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - ) -> Result>, NetError> { - self.idle = true; - match self.state { - NakamotoTenureDownloadState::GetTenureStartBlock(_block_id) => { - test_debug!( - "Got download response for tenure-start block {}", - &_block_id - ); - let block = response.decode_nakamoto_block()?; - self.try_accept_tenure_start_block(block)?; - Ok(None) - } - NakamotoTenureDownloadState::WaitForTenureEndBlock(..) => { - test_debug!("Invalid state -- Got download response for WaitForTenureBlock"); - Err(NetError::InvalidState) - } - NakamotoTenureDownloadState::GetTenureEndBlock(_block_id) => { - test_debug!("Got download response to tenure-end block {}", &_block_id); - let block = response.decode_nakamoto_block()?; - self.try_accept_tenure_end_block(&block)?; - Ok(None) - } - NakamotoTenureDownloadState::GetTenureBlocks(_end_block_id) => { - test_debug!( - "Got download response for tenure blocks ending at {}", - &_end_block_id - ); - let blocks = response.decode_nakamoto_tenure()?; - self.try_accept_tenure_blocks(blocks) - } - NakamotoTenureDownloadState::Done => Err(NetError::InvalidState), - } - } - - pub fn is_done(&self) -> bool { - self.state == NakamotoTenureDownloadState::Done - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs deleted file mode 100644 index 357b588e8a4..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ /dev/null @@ -1,647 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloadState, NakamotoTenureDownloader, - NakamotoUnconfirmedTenureDownloader, TenureStartEnd, WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// A set of confirmed downloader state machines assigned to one or more neighbors. The block -/// downloader runs tenure-downloaders in parallel, since the downloader for the N+1'st tenure -/// needs to feed data into the Nth tenure. This struct is responsible for scheduling peer -/// connections to downloader state machines, such that each peer is assigned to at most one -/// downloader. A peer is assigned a downloader for the duration of at most one RPC request, at -/// which point, it will be re-assigned a (possibly different) downloader. As such, each machine -/// can make progress even if there is only one available peer (in which case, that peer will get -/// scheduled across multiple machines to drive their progress in the right sequence such that -/// tenures will be incrementally fetched and yielded by the p2p state machine to the relayer). -pub struct NakamotoTenureDownloaderSet { - /// A list of instantiated downloaders that are in progress - pub(crate) downloaders: Vec>, - /// An assignment of peers to downloader machines in the `downloaders` list. - pub(crate) peers: HashMap, - /// The set of tenures that have been successfully downloaded (but possibly not yet stored or - /// processed) - pub(crate) completed_tenures: HashSet, -} - -impl NakamotoTenureDownloaderSet { - pub fn new() -> Self { - Self { - downloaders: vec![], - peers: HashMap::new(), - completed_tenures: HashSet::new(), - } - } - - /// Assign the given peer to the given downloader state machine. Allocate a slot for it if - /// needed. - fn add_downloader(&mut self, naddr: NeighborAddress, downloader: NakamotoTenureDownloader) { - test_debug!( - "Add downloader for tenure {} driven by {}", - &downloader.tenure_id_consensus_hash, - &naddr - ); - if let Some(idx) = self.peers.get(&naddr) { - self.downloaders[*idx] = Some(downloader); - } else { - self.downloaders.push(Some(downloader)); - self.peers.insert(naddr, self.downloaders.len() - 1); - } - } - - /// Does the given neighbor have an assigned downloader state machine? - pub(crate) fn has_downloader(&self, naddr: &NeighborAddress) -> bool { - let Some(idx) = self.peers.get(naddr) else { - return false; - }; - let Some(downloader_opt) = self.downloaders.get(*idx) else { - return false; - }; - downloader_opt.is_some() - } - - /// Drop the downloader associated with the given neighbor, if any. - pub fn clear_downloader(&mut self, naddr: &NeighborAddress) { - let Some(index) = self.peers.remove(naddr) else { - return; - }; - self.downloaders[index] = None; - } - - /// How many downloaders are there? - pub fn num_downloaders(&self) -> usize { - self.downloaders - .iter() - .fold(0, |acc, dl| if dl.is_some() { acc + 1 } else { acc }) - } - - /// How many downloaders are there, which are scheduled? - pub fn num_scheduled_downloaders(&self) -> usize { - let mut cnt = 0; - for (_, idx) in self.peers.iter() { - if let Some(Some(_)) = self.downloaders.get(*idx) { - cnt += 1; - } - } - cnt - } - - /// Add a sequence of (address, downloader) pairs to this downloader set. - pub(crate) fn add_downloaders( - &mut self, - iter: impl IntoIterator, - ) { - for (naddr, downloader) in iter { - if self.has_downloader(&naddr) { - test_debug!("Already have downloader for {}", &naddr); - continue; - } - self.add_downloader(naddr, downloader); - } - } - - /// Count up the number of in-flight messages, based on the states of each instantiated - /// downloader. - pub fn inflight(&self) -> usize { - let mut cnt = 0; - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.is_done() { - continue; - } - cnt += 1; - } - cnt - } - - /// Determine whether or not there exists a downloader for the given tenure, identified by its - /// consensus hash. - pub fn is_tenure_inflight(&self, ch: &ConsensusHash) -> bool { - self.downloaders - .iter() - .find(|d| d.as_ref().map(|x| &x.tenure_id_consensus_hash) == Some(ch)) - .is_some() - } - - /// Determine if this downloader set is empty -- i.e. there's no in-flight requests. - pub fn is_empty(&self) -> bool { - self.inflight() == 0 - } - - /// Try to resume processing a download state machine with a given peer. Since a peer is - /// detached from the machine after a single RPC call, this call is needed to re-attach it to a - /// (potentially different, unblocked) machine for the next RPC call to this peer. - /// - /// Returns true if the peer gets scheduled. - /// Returns false if not. - pub fn try_resume_peer(&mut self, naddr: NeighborAddress) -> bool { - if let Some(idx) = self.peers.get(&naddr) { - let Some(Some(_downloader)) = self.downloaders.get(*idx) else { - return false; - }; - - test_debug!( - "Peer {} already bound to downloader for {}", - &naddr, - &_downloader.tenure_id_consensus_hash - ); - return true; - } - for (i, downloader_opt) in self.downloaders.iter_mut().enumerate() { - let Some(downloader) = downloader_opt else { - continue; - }; - if !downloader.idle { - continue; - } - if downloader.is_waiting() { - continue; - } - if downloader.naddr != naddr { - continue; - } - test_debug!( - "Assign peer {} to work on downloader for {} in state {}", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state - ); - self.peers.insert(naddr, i); - return true; - } - return false; - } - - /// Deschedule peers that are bound to downloader slots that are either vacant or correspond to - /// blocked downloaders. - pub fn clear_available_peers(&mut self) { - let mut idled: Vec = vec![]; - for (naddr, i) in self.peers.iter() { - let Some(downloader_opt) = self.downloaders.get(*i) else { - // should be unreachable - idled.push(naddr.clone()); - continue; - }; - let Some(downloader) = downloader_opt else { - test_debug!("Remove peer {} for null download {}", &naddr, i); - idled.push(naddr.clone()); - continue; - }; - if downloader.idle || downloader.is_waiting() { - test_debug!( - "Remove idled peer {} for tenure download {}", - &naddr, - &downloader.tenure_id_consensus_hash - ); - idled.push(naddr.clone()); - } - } - for naddr in idled.into_iter() { - self.peers.remove(&naddr); - } - } - - /// Clear out downloaders (but not their peers) that have finished. The caller should follow - /// this up with a call to `clear_available_peers()`. - pub fn clear_finished_downloaders(&mut self) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - if downloader.is_done() { - *downloader_opt = None; - } - } - } - - /// Find the downloaders that have obtained their tenure-start blocks, and extract them. These - /// will be fed into other downloaders which are blocked on needing their tenure-end blocks. - pub(crate) fn find_new_tenure_start_blocks(&self) -> HashMap { - let mut ret = HashMap::new(); - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - let Some(block) = downloader.tenure_start_block.as_ref() else { - continue; - }; - ret.insert(block.block_id(), block.clone()); - } - ret - } - - /// Given a set of tenure-start blocks, pass them into downloaders that are waiting for their - /// tenure-end blocks. - /// Return a list of peers driving downloaders with failing `tenure_start_blocks` - pub(crate) fn handle_tenure_end_blocks( - &mut self, - tenure_start_blocks: &HashMap, - ) -> Vec { - test_debug!( - "handle tenure-end blocks: {:?}", - &tenure_start_blocks.keys().collect::>() - ); - let mut dead = vec![]; - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt else { - continue; - }; - let NakamotoTenureDownloadState::WaitForTenureEndBlock(end_block_id, ..) = - &downloader.state - else { - continue; - }; - let Some(end_block) = tenure_start_blocks.get(end_block_id) else { - continue; - }; - if let Err(e) = downloader.try_accept_tenure_end_block(end_block) { - warn!( - "Failed to accept tenure end-block {} for tenure {}: {:?}", - &end_block.block_id(), - &downloader.tenure_id_consensus_hash, - &e - ); - dead.push(downloader.naddr.clone()); - } - } - dead - } - - /// Does there exist a downloader (possibly unscheduled) for the given tenure? - pub(crate) fn has_downloader_for_tenure(&self, tenure_id: &ConsensusHash) -> bool { - for downloader_opt in self.downloaders.iter() { - let Some(downloader) = downloader_opt else { - continue; - }; - if &downloader.tenure_id_consensus_hash == tenure_id { - test_debug!( - "Have downloader for tenure {} already (idle={}, waiting={}, state={})", - tenure_id, - downloader.idle, - downloader.is_waiting(), - &downloader.state - ); - return true; - } - } - false - } - - /// In the event that the last confirmed tenure in a reward cycle contains the PoX anchor - /// block, we need to go and directly fetch its end block instead of waiting for another - /// NakamotoTenureDownloader to provide it as its tenure-start block. Naively, this method - /// just unconditionally sets the highest available tenure downloader to fetch its tenure end block. - pub(crate) fn try_transition_fetch_tenure_end_blocks( - &mut self, - tenure_block_ids: &HashMap, - ) { - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - downloader.transition_to_fetch_end_block_on_timeout(); - } - - // find tenures in which we need to fetch the tenure-end block directly. - let mut last_available_tenures: HashSet = HashSet::new(); - for (_, all_available) in tenure_block_ids.iter() { - for (_, available) in all_available.iter() { - if available.fetch_end_block { - last_available_tenures.insert(available.end_block_id.clone()); - } - } - } - - // is anyone downloading this tenure, and if so, are they waiting? If so, then flip to - // fetching - for downloader_opt in self.downloaders.iter_mut() { - let Some(downloader) = downloader_opt.as_mut() else { - continue; - }; - if !downloader.idle { - continue; - } - if !downloader.is_waiting() { - continue; - } - if !last_available_tenures.contains(&downloader.tenure_end_block_id) { - continue; - } - test_debug!( - "Transition downloader for {} from waiting to fetching", - &downloader.tenure_id_consensus_hash - ); - if let Err(e) = downloader.transition_to_fetch_end_block() { - warn!( - "Downloader for {} failed to transition to fetch end block: {:?}", - &downloader.tenure_id_consensus_hash, &e - ); - } - } - } - - /// Create a given number of downloads from a schedule and availability set. - /// Removes items from the schedule, and neighbors from the availability set. - /// A neighbor will be issued at most one request. - pub(crate) fn make_tenure_downloaders( - &mut self, - schedule: &mut VecDeque, - available: &mut HashMap>, - tenure_block_ids: &HashMap, - count: usize, - agg_public_keys: &BTreeMap>, - ) { - test_debug!("schedule: {:?}", schedule); - test_debug!("available: {:?}", &available); - test_debug!("tenure_block_ids: {:?}", &tenure_block_ids); - test_debug!("inflight: {}", self.inflight()); - test_debug!( - "count: {}, running: {}, scheduled: {}", - count, - self.num_downloaders(), - self.num_scheduled_downloaders() - ); - - self.clear_available_peers(); - self.clear_finished_downloaders(); - self.try_transition_fetch_tenure_end_blocks(tenure_block_ids); - while self.inflight() < count { - let Some(ch) = schedule.front() else { - break; - }; - if self.completed_tenures.contains(&ch) { - test_debug!("Already successfully downloaded tenure {}", &ch); - schedule.pop_front(); - continue; - } - let Some(neighbors) = available.get_mut(ch) else { - // not found on any neighbors, so stop trying this tenure - test_debug!("No neighbors have tenure {}", ch); - schedule.pop_front(); - continue; - }; - if neighbors.is_empty() { - // no more neighbors to try - test_debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - } - let Some(naddr) = neighbors.pop() else { - test_debug!("No more neighbors can serve tenure {}", ch); - schedule.pop_front(); - continue; - }; - if self.try_resume_peer(naddr.clone()) { - continue; - }; - if self.has_downloader_for_tenure(&ch) { - schedule.pop_front(); - continue; - } - - let Some(available_tenures) = tenure_block_ids.get(&naddr) else { - // this peer doesn't have any known tenures, so try the others - test_debug!("No tenures available from {}", &naddr); - continue; - }; - let Some(tenure_info) = available_tenures.get(ch) else { - // this peer does not have a tenure start/end block for this tenure, so try the - // others. - test_debug!("Neighbor {} does not serve tenure {}", &naddr, ch); - continue; - }; - let Some(Some(start_agg_pubkey)) = agg_public_keys.get(&tenure_info.start_reward_cycle) - else { - test_debug!( - "Cannot fetch tenure-start block due to no known aggregate public key: {:?}", - &tenure_info - ); - schedule.pop_front(); - continue; - }; - let Some(Some(end_agg_pubkey)) = agg_public_keys.get(&tenure_info.end_reward_cycle) - else { - test_debug!( - "Cannot fetch tenure-end block due to no known aggregate public key: {:?}", - &tenure_info - ); - schedule.pop_front(); - continue; - }; - - test_debug!( - "Download tenure {} (start={}, end={}) with aggregate keys {}, {} (rc {},{})", - &ch, - &tenure_info.start_block_id, - &tenure_info.end_block_id, - &start_agg_pubkey, - &end_agg_pubkey, - tenure_info.start_reward_cycle, - tenure_info.end_reward_cycle - ); - let tenure_download = NakamotoTenureDownloader::new( - ch.clone(), - tenure_info.start_block_id.clone(), - tenure_info.end_block_id.clone(), - naddr.clone(), - start_agg_pubkey.clone(), - end_agg_pubkey.clone(), - ); - - test_debug!("Request tenure {} from neighbor {}", ch, &naddr); - self.add_downloader(naddr, tenure_download); - schedule.pop_front(); - } - } - - /// Run all confirmed downloaders. - /// * Identify neighbors for which we do not have an inflight request - /// * Get each such neighbor's downloader, and generate its next HTTP reqeust. Send that - /// request to the neighbor and begin driving the underlying socket I/O. - /// * Get each HTTP reply, and pass it into the corresponding downloader's handler to advance - /// its state. - /// * Identify and remove misbehaving neighbors and neighbors whose connections have broken. - /// - /// Returns the set of downloaded blocks obtained for completed downloaders. These will be - /// full confirmed tenures. - pub fn run( - &mut self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> HashMap> { - let addrs: Vec<_> = self.peers.keys().cloned().collect(); - let mut finished = vec![]; - let mut finished_tenures = vec![]; - let mut new_blocks = HashMap::new(); - - // send requests - for (naddr, index) in self.peers.iter() { - if neighbor_rpc.has_inflight(&naddr) { - test_debug!("Peer {} has an inflight request", &naddr); - continue; - } - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); - continue; - }; - if downloader.is_done() { - test_debug!("Downloader for {} is done", &naddr); - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - test_debug!( - "Send request to {} for tenure {} (state {})", - &naddr, - &downloader.tenure_id_consensus_hash, - &downloader.state - ); - let Ok(sent) = downloader.send_next_download_request(network, neighbor_rpc) else { - test_debug!("Downloader for {} failed; this peer is dead", &naddr); - neighbor_rpc.add_dead(network, naddr); - continue; - }; - if !sent { - // this downloader is dead or broken - finished.push(naddr.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(&naddr); - } - } - for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - // handle responses - for (naddr, response) in neighbor_rpc.collect_replies(network) { - let Some(index) = self.peers.get(&naddr) else { - test_debug!("No downloader for {}", &naddr); - continue; - }; - let Some(Some(downloader)) = self.downloaders.get_mut(*index) else { - test_debug!("No downloader for {}", &naddr); - continue; - }; - test_debug!("Got response from {}", &naddr); - - let Ok(blocks_opt) = downloader.handle_next_download_response(response) else { - test_debug!("Failed to handle download response from {}", &naddr); - neighbor_rpc.add_dead(network, &naddr); - continue; - }; - - let Some(blocks) = blocks_opt else { - continue; - }; - - test_debug!( - "Got {} blocks for tenure {}", - blocks.len(), - &downloader.tenure_id_consensus_hash - ); - new_blocks.insert(downloader.tenure_id_consensus_hash.clone(), blocks); - if downloader.is_done() { - finished.push(naddr.clone()); - finished_tenures.push(downloader.tenure_id_consensus_hash.clone()); - continue; - } - } - - // clear dead, broken, and done - for naddr in addrs.iter() { - if neighbor_rpc.is_dead_or_broken(network, naddr) { - test_debug!("Remove dead/broken downloader for {}", &naddr); - self.clear_downloader(naddr); - } - } - for done_naddr in finished.drain(..) { - test_debug!("Remove finished downloader for {}", &done_naddr); - self.clear_downloader(&done_naddr); - } - for done_tenure in finished_tenures.drain(..) { - self.completed_tenures.insert(done_tenure); - } - - new_blocks - } -} diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs deleted file mode 100644 index 4c48a5762fb..00000000000 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_unconfirmed.rs +++ /dev/null @@ -1,754 +0,0 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::convert::TryFrom; -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::io::{Read, Write}; -use std::net::{IpAddr, SocketAddr}; -use std::time::{Duration, Instant}; - -use rand::seq::SliceRandom; -use rand::{thread_rng, RngCore}; -use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, PoxId, SortitionId, StacksBlockId, -}; -use stacks_common::types::net::{PeerAddress, PeerHost}; -use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::to_hex; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; - -use crate::burnchains::{Burnchain, BurnchainView, PoxConstants}; -use crate::chainstate::burn::db::sortdb::{ - BlockHeaderCache, SortitionDB, SortitionDBConn, SortitionHandleConn, -}; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::{ - NakamotoBlock, NakamotoBlockHeader, NakamotoChainState, NakamotoStagingBlocksConnRef, -}; -use crate::chainstate::stacks::db::StacksChainState; -use crate::chainstate::stacks::{ - Error as chainstate_error, StacksBlockHeader, TenureChangePayload, -}; -use crate::core::{ - EMPTY_MICROBLOCK_PARENT_HASH, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, -}; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::chat::ConversationP2P; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::{ - AvailableTenures, NakamotoTenureDownloader, NakamotoTenureDownloaderSet, TenureStartEnd, - WantedTenure, -}; -use crate::net::http::HttpRequestContents; -use crate::net::httpcore::{StacksHttpRequest, StacksHttpResponse}; -use crate::net::inv::epoch2x::InvState; -use crate::net::inv::nakamoto::{NakamotoInvStateMachine, NakamotoTenureInv}; -use crate::net::neighbors::rpc::NeighborRPC; -use crate::net::neighbors::NeighborComms; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{Error as NetError, Neighbor, NeighborAddress, NeighborKey}; -use crate::util_lib::db::{DBConn, Error as DBError}; - -/// Download states for a unconfirmed tenures. These include the ongoing tenure, as well as the -/// last complete tenure whose tenure-end block hash has not yet been written to the burnchain (but -/// the tenure-start hash has -- it was done so in the block-commit for the ongoing tenure). -#[derive(Debug, Clone, PartialEq)] -pub enum NakamotoUnconfirmedDownloadState { - /// Getting the tenure tip information - GetTenureInfo, - /// Get the tenure start block for the ongoing tenure. - /// The inner value is tenure-start block ID of the ongoing tenure. - GetTenureStartBlock(StacksBlockId), - /// Receiving unconfirmed tenure blocks. - /// The inner value is the _last_ block on the ongoing tenure. The ongoing tenure is fetched - /// from highest block to lowest block. - GetUnconfirmedTenureBlocks(StacksBlockId), - /// We have gotten all the unconfirmed blocks for this tenure, and we now have the end block - /// for the highest complete tenure (which can now be obtained via `NakamotoTenureDownloadState`). - Done, -} - -impl fmt::Display for NakamotoUnconfirmedDownloadState { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -/// Download state machine for the unconfirmed tenures. It operates in the following steps: -/// -/// 1. Get /v3/tenures/info to learn the unconfirmed chain tip -/// 2. Get the tenure-start block for the unconfirmed chain tip -/// 3. Get the unconfirmed blocks, starting with the one identified by step (1) and ending with the -/// immediate child of the one obtained in (2) -/// -/// Once this state-machine finishes execution, the tenure-start block is used to construct a -/// `NakamotoTenureDownloader` state machine for the highest-confirmed tenure. -/// -#[derive(Debug, Clone, PartialEq)] -pub struct NakamotoUnconfirmedTenureDownloader { - /// state of this machine - pub state: NakamotoUnconfirmedDownloadState, - /// Address of who we're asking - pub naddr: NeighborAddress, - /// Aggregate public key of the highest confirmed tenure - pub confirmed_aggregate_public_key: Option, - /// Aggregate public key of the unconfirmed (ongoing) tenure - pub unconfirmed_aggregate_public_key: Option, - /// Block ID of this node's highest-processed block. - /// We will not download any blocks lower than this, if it's set. - pub highest_processed_block_id: Option, - /// Highest processed block height (which may not need to be loaded) - pub highest_processed_block_height: Option, - - /// Tenure tip info we obtained for this peer - pub tenure_tip: Option, - /// Tenure start block for the ongoing tip. - /// This is also the tenure-end block for the highest-complete tip. - pub unconfirmed_tenure_start_block: Option, - /// Unconfirmed tenure blocks obtained - pub unconfirmed_tenure_blocks: Option>, -} - -impl NakamotoUnconfirmedTenureDownloader { - /// Make a new downloader which will download blocks from the tip back down to the optional - /// `highest_processed_block_id` (so we don't re-download the same blocks over and over). - pub fn new(naddr: NeighborAddress, highest_processed_block_id: Option) -> Self { - Self { - state: NakamotoUnconfirmedDownloadState::GetTenureInfo, - naddr, - confirmed_aggregate_public_key: None, - unconfirmed_aggregate_public_key: None, - highest_processed_block_id, - highest_processed_block_height: None, - tenure_tip: None, - unconfirmed_tenure_start_block: None, - unconfirmed_tenure_blocks: None, - } - } - - /// What's the tenure ID of the ongoing tenure? This is learned from /v3/tenure/info, which is - /// checked upon receipt against the burnchain state (so we're not blindly trusting the remote - /// node). - pub fn unconfirmed_tenure_id(&self) -> Option<&ConsensusHash> { - self.tenure_tip.as_ref().map(|tt| &tt.consensus_hash) - } - - /// Set the highest-processed block. - /// This can be performed by the downloader itself in order to inform ongoing requests for - /// unconfirmed tenures of newly-processed blocks, so they don't re-download blocks this node - /// has already handled. - pub fn set_highest_processed_block( - &mut self, - highest_processed_block_id: StacksBlockId, - highest_processed_block_height: u64, - ) { - self.highest_processed_block_id = Some(highest_processed_block_id); - self.highest_processed_block_height = Some(highest_processed_block_height); - } - - /// Try and accept the tenure info. It will be validated against the sortition DB and its tip. - /// - /// * tenure_tip.consensus_hash - /// This is the consensus hash of the remote node's ongoing tenure. It may not be the - /// sortition tip, e.g. if the tenure spans multiple sortitions. - /// * tenure_tip.tenure_start_block_id - /// This is the first block ID of the ongoing unconfirmed tenure. - /// * tenure_tip.parent_consensus_hash - /// This is the consensus hash of the parent of the ongoing tenure. It's the node's highest - /// complete tenure, for which we know the start and end block IDs. - /// * tenure_tip.parent_tenure_start_block_id - /// This is the tenure start block for the highest complete tenure. It should be equal to - /// the winning Stacks block hash of the snapshot for the ongoing tenure. - /// - /// We may already have the tenure-start block for the unconfirmed tenure. If so, then don't go - /// fetch it again; just get the new unconfirmed blocks. - pub fn try_accept_tenure_info( - &mut self, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - remote_tenure_tip: RPCGetTenureInfo, - agg_pubkeys: &BTreeMap>, - ) -> Result<(), NetError> { - if self.state != NakamotoUnconfirmedDownloadState::GetTenureInfo { - return Err(NetError::InvalidState); - } - if self.tenure_tip.is_some() { - return Err(NetError::InvalidState); - } - - // authenticate consensus hashes against canonical chain history - let local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.consensus_hash, - )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; - let parent_local_tenure_sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &remote_tenure_tip.parent_consensus_hash, - )? - .ok_or(NetError::DBError(DBError::NotFoundError))?; - - let ih = sortdb.index_handle(&local_sort_tip.sortition_id); - let ancestor_local_tenure_sn = ih - .get_block_snapshot_by_height(local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError))?; - - if ancestor_local_tenure_sn.sortition_id != local_tenure_sn.sortition_id { - // .consensus_hash is not on the canonical fork - warn!("Unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash); - return Err(DBError::NotFoundError.into()); - } - let ancestor_parent_local_tenure_sn = ih - .get_block_snapshot_by_height(parent_local_tenure_sn.block_height)? - .ok_or(NetError::DBError(DBError::NotFoundError.into()))?; - - if ancestor_parent_local_tenure_sn.sortition_id != parent_local_tenure_sn.sortition_id { - // .parent_consensus_hash is not on the canonical fork - warn!("Parent unconfirmed tenure consensus hash is not canonical"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(DBError::NotFoundError.into()); - } - - // parent tenure sortition must precede the ongoing tenure sortition - if local_tenure_sn.block_height <= parent_local_tenure_sn.block_height { - warn!("Parent tenure snapshot is not an ancestor of the current tenure snapshot"; - "peer" => %self.naddr, - "consensus_hash" => %remote_tenure_tip.consensus_hash, - "parent_consensus_hash" => %remote_tenure_tip.parent_consensus_hash); - return Err(NetError::InvalidMessage); - } - - // parent tenure start block ID must be the winning block hash for the ongoing tenure's - // snapshot - if local_tenure_sn.winning_stacks_block_hash.0 - != remote_tenure_tip.parent_tenure_start_block_id.0 - { - warn!("Ongoing tenure does not commit to highest complete tenure's start block"; - "remote_tenure_tip.tenure_start_block_id" => %remote_tenure_tip.tenure_start_block_id, - "local_tenure_sn.winning_stacks_block_hash" => %local_tenure_sn.winning_stacks_block_hash); - return Err(NetError::InvalidMessage); - } - - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - // we've synchronize this tenure before, so don't get anymore blocks before it. - let highest_processed_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(highest_processed_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? - .0; - - let highest_processed_block_height = highest_processed_block.header.chain_length; - self.highest_processed_block_height = Some(highest_processed_block_height); - - if &remote_tenure_tip.tip_block_id == highest_processed_block_id - || highest_processed_block_height > remote_tenure_tip.tip_height - { - // nothing to do -- we're at or ahead of the remote peer, so finish up. - // If we don't have the tenure-start block for the confirmed tenure that the remote - // peer claims to have, then the remote peer has sent us invalid data and we should - // treat it as such. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::InvalidMessage)? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::Done; - } - } - - if self.state == NakamotoUnconfirmedDownloadState::Done { - // only need to remember the tenure tip - self.tenure_tip = Some(remote_tenure_tip); - return Ok(()); - } - - // we're not finished - let tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, local_tenure_sn.block_height) - .expect("FATAL: sortition from before system start"); - let parent_tenure_rc = sortdb - .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - parent_local_tenure_sn.block_height, - ) - .expect("FATAL: sortition from before system start"); - - // get aggregate public keys for the unconfirmed tenure and highest-complete tenure sortitions - let Some(Some(confirmed_aggregate_public_key)) = - agg_pubkeys.get(&parent_tenure_rc).cloned() - else { - warn!( - "No aggregate public key for confirmed tenure {} (rc {})", - &parent_local_tenure_sn.consensus_hash, parent_tenure_rc - ); - return Err(NetError::InvalidState); - }; - - let Some(Some(unconfirmed_aggregate_public_key)) = agg_pubkeys.get(&tenure_rc).cloned() - else { - warn!( - "No aggregate public key for confirmed tenure {} (rc {})", - &local_tenure_sn.consensus_hash, tenure_rc - ); - return Err(NetError::InvalidState); - }; - - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block(&remote_tenure_tip.tenure_start_block_id.clone())? - { - // proceed to get unconfirmed blocks. We already have the tenure-start block. - let unconfirmed_tenure_start_block = chainstate - .nakamoto_blocks_db() - .get_nakamoto_block(&remote_tenure_tip.tenure_start_block_id)? - .ok_or(NetError::DBError(DBError::NotFoundError))? - .0; - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - remote_tenure_tip.tip_block_id.clone(), - ); - } else { - // get the tenure-start block first - self.state = NakamotoUnconfirmedDownloadState::GetTenureStartBlock( - remote_tenure_tip.tenure_start_block_id.clone(), - ); - } - - test_debug!( - "Will validate unconfirmed blocks with ({},{}) and ({},{})", - &confirmed_aggregate_public_key, - parent_tenure_rc, - &unconfirmed_aggregate_public_key, - tenure_rc - ); - self.confirmed_aggregate_public_key = Some(confirmed_aggregate_public_key); - self.unconfirmed_aggregate_public_key = Some(unconfirmed_aggregate_public_key); - self.tenure_tip = Some(remote_tenure_tip); - - Ok(()) - } - - /// Validate and accept the unconfirmed tenure-start block. If accepted, then advance the state. - /// Returns Ok(()) if the unconfirmed tenure start block was valid - /// Returns Err(..) if it was not valid, or if this function was called out of sequence. - pub fn try_accept_unconfirmed_tenure_start_block( - &mut self, - unconfirmed_tenure_start_block: NakamotoBlock, - ) -> Result<(), NetError> { - let NakamotoUnconfirmedDownloadState::GetTenureStartBlock(tenure_start_block_id) = - &self.state - else { - warn!("Invalid state for this method"; - "state" => %self.state); - return Err(NetError::InvalidState); - }; - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // stacker signature has to match the current aggregate public key - if !unconfirmed_tenure_start_block - .header - .verify_signer(unconfirmed_aggregate_public_key) - { - warn!("Invalid tenure-start block: bad signer signature"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_start_block.header.block_id" => %unconfirmed_tenure_start_block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // block has to match the expected hash - if tenure_start_block_id != &unconfirmed_tenure_start_block.header.block_id() { - warn!("Invalid tenure-start block"; - "tenure_id_start_block" => %tenure_start_block_id, - "unconfirmed_tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "unconfirmed_tenure_start_block ID" => %unconfirmed_tenure_start_block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // furthermore, the block has to match the expected tenure ID - if unconfirmed_tenure_start_block.header.consensus_hash != tenure_tip.consensus_hash { - warn!("Invalid tenure-start block or tenure-tip: consensus hash mismatch"; - "tenure_start_block.header.consensus_hash" => %unconfirmed_tenure_start_block.header.consensus_hash, - "tenure_tip.consensus_hash" => %tenure_tip.consensus_hash); - return Err(NetError::InvalidMessage); - } - - self.unconfirmed_tenure_start_block = Some(unconfirmed_tenure_start_block); - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone(), - ); - Ok(()) - } - - /// Add downloaded unconfirmed tenure blocks. - /// If we have collected all tenure blocks, then return them. - /// Returns Ok(Some(list-of-blocks)) on success, in which case, `list-of-blocks` is the - /// height-ordered sequence of blocks in this tenure, and includes only the blocks that come - /// after the highest-processed block (if set). - /// Returns Ok(None) if there are still blocks to fetch, in which case, the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on invalid state or invalid block. - pub fn try_accept_unconfirmed_tenure_blocks( - &mut self, - mut tenure_blocks: Vec, - ) -> Result>, NetError> { - let NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(last_block_id) = - &self.state - else { - return Err(NetError::InvalidState); - }; - - let Some(tenure_tip) = self.tenure_tip.as_ref() else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; - - if tenure_blocks.is_empty() { - // nothing to do - return Ok(None); - } - - // blocks must be contiguous and in order from highest to lowest. - // If there's a tenure-start block, it must be last. - let mut expected_block_id = last_block_id; - let mut finished_download = false; - for (cnt, block) in tenure_blocks.iter().enumerate() { - if &block.header.block_id() != expected_block_id { - warn!("Unexpected Nakamoto block -- not part of tenure"; - "expected_block_id" => %expected_block_id, - "block_id" => %block.header.block_id()); - return Err(NetError::InvalidMessage); - } - if !block.header.verify_signer(unconfirmed_aggregate_public_key) { - warn!("Invalid block: bad signer signature"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "unconfirmed_aggregate_public_key" => %unconfirmed_aggregate_public_key, - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - // we may or may not need the tenure-start block for the unconfirmed tenure. But if we - // do, make sure it's valid, and it's the last block we receive. - let Ok(is_tenure_start) = block.is_wellformed_tenure_start_block() else { - warn!("Invalid tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - }; - if is_tenure_start { - // this is the tenure-start block, so make sure it matches our /v3/tenure/info - if block.header.block_id() != tenure_tip.tenure_start_block_id { - warn!("Unexpected tenure-start block"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "tenure_tip.tenure_start_block_id" => %tenure_tip.tenure_start_block_id); - return Err(NetError::InvalidMessage); - } - - if cnt.saturating_add(1) != tenure_blocks.len() { - warn!("Invalid tenure stream -- got tenure-start before end of tenure"; - "tenure_id" => %tenure_tip.consensus_hash, - "block.header.block_id" => %block.header.block_id(), - "cnt" => cnt, - "len" => tenure_blocks.len(), - "state" => %self.state); - return Err(NetError::InvalidMessage); - } - - finished_download = true; - break; - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_id) = self.highest_processed_block_id.as_ref() { - if expected_block_id == highest_processed_block_id { - // got all the blocks we asked for - finished_download = true; - break; - } - } - - // NOTE: this field can get updated by the downloader while this state-machine is in - // this state. - if let Some(highest_processed_block_height) = - self.highest_processed_block_height.as_ref() - { - if &block.header.chain_length < highest_processed_block_height { - // no need to continue this download - debug!("Cancelling unconfirmed tenure download to {}: have processed block at height {} already", &self.naddr, highest_processed_block_height); - finished_download = true; - break; - } - } - - expected_block_id = &block.header.parent_block_id; - } - - if let Some(blocks) = self.unconfirmed_tenure_blocks.as_mut() { - blocks.append(&mut tenure_blocks); - } else { - self.unconfirmed_tenure_blocks = Some(tenure_blocks); - } - - if finished_download { - // we have all of the unconfirmed tenure blocks that were requested. - // only return those newer than the highest block. - self.state = NakamotoUnconfirmedDownloadState::Done; - let highest_processed_block_height = - *self.highest_processed_block_height.as_ref().unwrap_or(&0); - return Ok(self.unconfirmed_tenure_blocks.take().map(|blocks| { - blocks - .into_iter() - .filter(|block| block.header.chain_length > highest_processed_block_height) - .rev() - .collect() - })); - } - - let Some(blocks) = self.unconfirmed_tenure_blocks.as_ref() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - - // still have more to get - let Some(earliest_block) = blocks.last() else { - // unreachable but be defensive - warn!("Invalid state: no blocks (infallible -- got empty vec)"); - return Err(NetError::InvalidState); - }; - let next_block_id = earliest_block.header.parent_block_id.clone(); - - self.state = NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(next_block_id); - Ok(None) - } - - /// Once this machine runs to completion, examine its state to see if we still need to fetch - /// the highest complete tenure. We may not need to, especially if we're just polling for new - /// unconfirmed blocks. - /// - /// Return Ok(true) if we need it still - /// Return Ok(false) if we already have it - /// Return Err(..) if we encounter a DB error or if this function was called out of sequence. - pub fn need_highest_complete_tenure( - &self, - chainstate: &StacksChainState, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - - // if we've processed the unconfirmed tenure-start block already, then we've necessarily - // downloaded and processed the highest-complete tenure already. - Ok(!NakamotoChainState::has_block_header( - chainstate.db(), - &unconfirmed_tenure_start_block.header.block_id(), - false, - )?) - } - - /// Create a NakamotoTenureDownloader for the highest complete tenure. We already have the - /// tenure-end block (which will be supplied to the downloader), but we'll still want to go get - /// its tenure-start block. - /// - /// Returns Ok(downloader) on success - /// Returns Err(..) if we call this function out of sequence. - pub fn make_highest_complete_tenure_downloader( - &self, - highest_tenure: &WantedTenure, - unconfirmed_tenure: &WantedTenure, - ) -> Result { - if self.state != NakamotoUnconfirmedDownloadState::Done { - return Err(NetError::InvalidState); - } - let Some(unconfirmed_tenure_start_block) = self.unconfirmed_tenure_start_block.as_ref() - else { - return Err(NetError::InvalidState); - }; - let Some(confirmed_aggregate_public_key) = self.confirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; - let Some(unconfirmed_aggregate_public_key) = self.unconfirmed_aggregate_public_key.as_ref() - else { - return Err(NetError::InvalidState); - }; - - test_debug!( - "Create highest complete tenure downloader for {}", - &highest_tenure.tenure_id_consensus_hash - ); - let ntd = NakamotoTenureDownloader::new( - highest_tenure.tenure_id_consensus_hash.clone(), - unconfirmed_tenure.winning_block_id.clone(), - unconfirmed_tenure_start_block.header.block_id(), - self.naddr.clone(), - confirmed_aggregate_public_key.clone(), - unconfirmed_aggregate_public_key.clone(), - ) - .with_tenure_end_block(unconfirmed_tenure_start_block.clone()); - - Ok(ntd) - } - - /// Produce the next HTTP request that, when successfully executed, will advance this state - /// machine. - /// - /// Returns Some(request) if a request must be sent. - /// Returns None if we're done - pub fn make_next_download_request(&self, peerhost: PeerHost) -> Option { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - // need to get the tenure tip - return Some(StacksHttpRequest::new_get_nakamoto_tenure_info(peerhost)); - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_block( - peerhost, - block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(tip_block_id) => { - return Some(StacksHttpRequest::new_get_nakamoto_tenure( - peerhost, - tip_block_id.clone(), - self.highest_processed_block_id.clone(), - )); - } - NakamotoUnconfirmedDownloadState::Done => { - // got all unconfirmed blocks! Next step is to turn this downloader into a confirmed - // tenure downloader using the earliest unconfirmed tenure block. - return None; - } - } - } - - /// Begin the next download request for this state machine. - /// Returns Ok(()) if we sent the request, or there's already an in-flight request. The - /// caller should try this again until it gets one of the other possible return values. It's - /// up to the caller to determine when it's appropriate to convert this state machine into a - /// `NakamotoTenureDownloader`. - /// Returns Err(..) if the neighbor is dead or broken. - pub fn send_next_download_request( - &self, - network: &mut PeerNetwork, - neighbor_rpc: &mut NeighborRPC, - ) -> Result<(), NetError> { - if neighbor_rpc.has_inflight(&self.naddr) { - test_debug!("Peer {} has an inflight request", &self.naddr); - return Ok(()); - } - if neighbor_rpc.is_dead_or_broken(network, &self.naddr) { - return Err(NetError::PeerNotConnected); - } - - let Some(peerhost) = NeighborRPC::get_peer_host(network, &self.naddr) else { - // no conversation open to this neighbor - neighbor_rpc.add_dead(network, &self.naddr); - return Err(NetError::PeerNotConnected); - }; - - let Some(request) = self.make_next_download_request(peerhost) else { - // treat this downloader as still in-flight since the overall state machine will need - // to keep it around long enough to convert it into a tenure downloader for the highest - // complete tenure. - return Ok(()); - }; - - neighbor_rpc.send_request(network, self.naddr.clone(), request)?; - Ok(()) - } - - /// Handle a received StacksHttpResponse and advance this machine's state - /// If we get the full tenure, return it. - /// - /// Returns Ok(Some(blocks)) if we finished downloading the unconfirmed tenure - /// Returns Ok(None) if we're still working, in which case the caller should call - /// `send_next_download_request()` - /// Returns Err(..) on unrecoverable failure to advance state - pub fn handle_next_download_response( - &mut self, - response: StacksHttpResponse, - sortdb: &SortitionDB, - local_sort_tip: &BlockSnapshot, - chainstate: &StacksChainState, - agg_pubkeys: &BTreeMap>, - ) -> Result>, NetError> { - match &self.state { - NakamotoUnconfirmedDownloadState::GetTenureInfo => { - test_debug!("Got tenure-info response"); - let remote_tenure_info = response.decode_nakamoto_tenure_info()?; - test_debug!("Got tenure-info response: {:?}", &remote_tenure_info); - self.try_accept_tenure_info( - sortdb, - local_sort_tip, - chainstate, - remote_tenure_info, - agg_pubkeys, - )?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetTenureStartBlock(..) => { - test_debug!("Got tenure start-block response"); - let block = response.decode_nakamoto_block()?; - self.try_accept_unconfirmed_tenure_start_block(block)?; - Ok(None) - } - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks(..) => { - test_debug!("Got unconfirmed tenure blocks response"); - let blocks = response.decode_nakamoto_tenure()?; - self.try_accept_unconfirmed_tenure_blocks(blocks) - } - NakamotoUnconfirmedDownloadState::Done => { - return Err(NetError::InvalidState); - } - } - } - - /// Is this machine finished? - pub fn is_done(&self) -> bool { - self.state == NakamotoUnconfirmedDownloadState::Done - } -} diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index d4a90b19c16..287acb11661 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -265,7 +265,7 @@ impl StacksMessageCodec for HttpRequestPreamble { } // "User-Agent: $agent\r\nHost: $host\r\n" - fd.write_all("User-Agent: stacks/3.0\r\nHost: ".as_bytes()) + fd.write_all("User-Agent: stacks/2.0\r\nHost: ".as_bytes()) .map_err(CodecError::WriteError)?; fd.write_all(format!("{}", self.host).as_bytes()) .map_err(CodecError::WriteError)?; diff --git a/stackslib/src/net/http/tests.rs b/stackslib/src/net/http/tests.rs index 508ca55c6e0..4a6ff91d816 100644 --- a/stackslib/src/net/http/tests.rs +++ b/stackslib/src/net/http/tests.rs @@ -225,7 +225,7 @@ fn test_http_request_preamble_headers() { assert!(txt.find("HTTP/1.1").is_some(), "HTTP version is missing"); assert!( - txt.find("User-Agent: stacks/3.0\r\n").is_some(), + txt.find("User-Agent: stacks/2.0\r\n").is_some(), "User-Agnet header is missing" ); assert!( diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 8df013a8c07..480743a3696 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -50,7 +50,7 @@ use crate::util_lib::db::{DBConn, Error as db_error}; #[cfg(not(test))] pub const INV_SYNC_INTERVAL: u64 = 150; #[cfg(test)] -pub const INV_SYNC_INTERVAL: u64 = 3; +pub const INV_SYNC_INTERVAL: u64 = 0; #[cfg(not(test))] pub const INV_REWARD_CYCLES: u64 = 2; diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index de46d15744a..612f9d1f619 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -17,7 +17,6 @@ use std::collections::{BTreeMap, HashMap}; use stacks_common::bitvec::BitVec; -use stacks_common::types::StacksEpochId; use stacks_common::util::get_epoch_time_secs; use crate::burnchains::PoxConstants; @@ -275,7 +274,6 @@ impl NakamotoTenureInv { pub fn new( first_block_height: u64, reward_cycle_len: u64, - cur_reward_cycle: u64, neighbor_address: NeighborAddress, ) -> Self { Self { @@ -285,7 +283,7 @@ impl NakamotoTenureInv { first_block_height, reward_cycle_len, neighbor_address, - cur_reward_cycle, + cur_reward_cycle: 0, online: true, start_sync_time: 0, } @@ -353,7 +351,6 @@ impl NakamotoTenureInv { /// Adjust the next reward cycle to query. /// Returns the reward cycle to query. pub fn next_reward_cycle(&mut self) -> u64 { - test_debug!("Next reward cycle: {}", self.cur_reward_cycle + 1); let query_rc = self.cur_reward_cycle; self.cur_reward_cycle = self.cur_reward_cycle.saturating_add(1); query_rc @@ -363,10 +360,7 @@ impl NakamotoTenureInv { /// can talk to the peer again pub fn try_reset_comms(&mut self, inv_sync_interval: u64, start_rc: u64, cur_rc: u64) { let now = get_epoch_time_secs(); - if self.start_sync_time + inv_sync_interval <= now - && (self.cur_reward_cycle >= cur_rc || !self.online) - { - test_debug!("Reset inv comms for {}", &self.neighbor_address); + if self.start_sync_time + inv_sync_interval <= now && self.cur_reward_cycle > cur_rc { self.state = NakamotoInvState::GetNakamotoInvBegin; self.online = true; self.start_sync_time = now; @@ -398,10 +392,9 @@ impl NakamotoTenureInv { current_reward_cycle: u64, ) -> bool { debug!( - "{:?}: Begin Nakamoto inventory sync for {} in cycle {}", + "{:?}: Begin Nakamoto inventory sync for {}", network.get_local_peer(), - self.neighbor_address, - current_reward_cycle, + self.neighbor_address ); // possibly reset communications with this peer, if it's time to do so. @@ -489,8 +482,6 @@ pub struct NakamotoInvStateMachine { pub(crate) inventories: HashMap, /// Reward cycle consensus hashes reward_cycle_consensus_hashes: BTreeMap, - /// last observed sortition tip - last_sort_tip: Option, } impl NakamotoInvStateMachine { @@ -499,7 +490,6 @@ impl NakamotoInvStateMachine { comms, inventories: HashMap::new(), reward_cycle_consensus_hashes: BTreeMap::new(), - last_sort_tip: None, } } @@ -541,56 +531,28 @@ impl NakamotoInvStateMachine { /// Returns the current reward cycle. fn update_reward_cycle_consensus_hashes( &mut self, - tip: &BlockSnapshot, sortdb: &SortitionDB, ) -> Result { - // check for reorg - let reorg = PeerNetwork::is_reorg(self.last_sort_tip.as_ref(), tip, sortdb); - if reorg { - // drop the last two reward cycles - test_debug!("Detected reorg! Refreshing inventory consensus hashes"); - let highest_rc = self - .reward_cycle_consensus_hashes - .last_key_value() - .map(|(highest_rc, _)| *highest_rc) - .unwrap_or(0); - - self.reward_cycle_consensus_hashes.remove(&highest_rc); - self.reward_cycle_consensus_hashes - .remove(&highest_rc.saturating_sub(1)); - } - let highest_rc = self .reward_cycle_consensus_hashes .last_key_value() .map(|(highest_rc, _)| *highest_rc) .unwrap_or(0); - // NOTE: reward cycles start when (sortition_height % reward_cycle_len) == 1, not 0, but - // .block_height_to_reward_cycle does not account for this. + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let tip_rc = sortdb .pox_constants - .block_height_to_reward_cycle( - sortdb.first_block_height, - tip.block_height.saturating_sub(1), - ) + .block_height_to_reward_cycle(sortdb.first_block_height, sn.block_height) .expect("FATAL: snapshot occurred before system start"); - test_debug!( - "Load all reward cycle consensus hashes from {} to {}", - highest_rc, - tip_rc - ); for rc in highest_rc..=tip_rc { if self.reward_cycle_consensus_hashes.contains_key(&rc) { continue; } let Some(ch) = Self::load_consensus_hash_for_reward_cycle(sortdb, rc)? else { // NOTE: this should be unreachable, but don't panic - warn!("Failed to load consensus hash for reward cycle {}", rc); return Err(DBError::NotFoundError.into()); }; - test_debug!("Inv reward cycle consensus hash for {} is {}", rc, &ch); self.reward_cycle_consensus_hashes.insert(rc, ch); } Ok(tip_rc) @@ -617,15 +579,7 @@ impl NakamotoInvStateMachine { ibd: bool, ) -> Result<(), NetError> { // make sure we know all consensus hashes for all reward cycles. - let current_reward_cycle = - self.update_reward_cycle_consensus_hashes(&network.burnchain_tip, sortdb)?; - let nakamoto_start_height = network - .get_epoch_by_epoch_id(StacksEpochId::Epoch30) - .start_height; - let nakamoto_start_rc = network - .get_burnchain() - .block_height_to_reward_cycle(nakamoto_start_height) - .unwrap_or(0); + let current_reward_cycle = self.update_reward_cycle_consensus_hashes(sortdb)?; // we're updating inventories, so preserve the state we have let mut new_inventories = HashMap::new(); @@ -663,7 +617,6 @@ impl NakamotoInvStateMachine { .pox_constants .reward_cycle_length .into(), - nakamoto_start_rc, naddr.clone(), ) }); @@ -673,11 +626,6 @@ impl NakamotoInvStateMachine { new_inventories.insert(naddr.clone(), inv); if self.comms.has_inflight(&naddr) { - test_debug!( - "{:?}: still waiting for reply from {}", - network.get_local_peer(), - &naddr - ); continue; } @@ -691,9 +639,8 @@ impl NakamotoInvStateMachine { }; debug!( - "{:?}: send GetNakamotoInv ({:?})) for reward cycle {} to {}", + "{:?}: send GetNakamotoInv for reward cycle {} to {}", network.get_local_peer(), - &getnakamotoinv, inv_rc, &naddr ); @@ -774,10 +721,8 @@ impl NakamotoInvStateMachine { ); e }) else { - self.last_sort_tip = Some(network.burnchain_tip.clone()); return false; }; - self.last_sort_tip = Some(network.burnchain_tip.clone()); learned } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 2b4f9a053e8..d212aa50fd3 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -66,7 +66,7 @@ use crate::burnchains::{Error as burnchain_error, Txid}; use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::chainstate::burn::{ConsensusHash, Opcodes}; use crate::chainstate::coordinator::Error as coordinator_error; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use crate::chainstate::nakamoto::NakamotoChainState; use crate::chainstate::stacks::boot::{ BOOT_TEST_POX_4_AGG_KEY_CONTRACT, BOOT_TEST_POX_4_AGG_KEY_FNAME, }; @@ -282,8 +282,6 @@ pub enum Error { Http(HttpErr), /// Invalid state machine state reached InvalidState, - /// Waiting for DNS resolution - WaitingForDNS, } impl From for Error { @@ -431,7 +429,6 @@ impl fmt::Display for Error { } Error::Http(e) => fmt::Display::fmt(&e, f), Error::InvalidState => write!(f, "Invalid state-machine state reached"), - Error::WaitingForDNS => write!(f, "Waiting for DNS resolution"), } } } @@ -504,7 +501,6 @@ impl error::Error for Error { Error::StackerDBChunkTooBig(..) => None, Error::Http(ref e) => Some(e), Error::InvalidState => None, - Error::WaitingForDNS => None, } } } @@ -971,10 +967,6 @@ impl NeighborAddress { public_key_hash: pkh, } } - - pub fn to_socketaddr(&self) -> SocketAddr { - self.addrbytes.to_socketaddr(self.port) - } } /// A descriptor of a list of known peers @@ -1025,7 +1017,6 @@ pub mod NackErrorCodes { pub const InvalidMessage: u32 = 5; pub const NoSuchDB: u32 = 6; pub const StaleVersion: u32 = 7; - pub const StaleView: u32 = 8; } #[derive(Debug, Clone, PartialEq)] @@ -1048,9 +1039,7 @@ pub struct NatPunchData { /// Inform the remote peer of (a page of) the list of stacker DB contracts this node supports #[derive(Debug, Clone, PartialEq)] pub struct StackerDBHandshakeData { - /// current reward cycle consensus hash (i.e. the consensus hash of the Stacks tip in the - /// current reward cycle, which commits to both the Stacks block tip and the underlying PoX - /// history). + /// current reward cycle ID pub rc_consensus_hash: ConsensusHash, /// list of smart contracts that we index. /// there can be as many as 256 entries. @@ -1062,7 +1051,7 @@ pub struct StackerDBHandshakeData { pub struct StackerDBGetChunkInvData { /// smart contract being used to determine chunk quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the Stacks chain tip in this reward cycle + /// consensus hash of the sortition that started this reward cycle pub rc_consensus_hash: ConsensusHash, } @@ -1081,7 +1070,7 @@ pub struct StackerDBChunkInvData { pub struct StackerDBGetChunkData { /// smart contract being used to determine slot quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the Stacks chain tip in this reward cycle + /// consensus hash of the sortition that started this reward cycle pub rc_consensus_hash: ConsensusHash, /// slot ID pub slot_id: u32, @@ -1094,7 +1083,7 @@ pub struct StackerDBGetChunkData { pub struct StackerDBPushChunkData { /// smart contract being used to determine chunk quantity and order pub contract_id: QualifiedContractIdentifier, - /// consensus hash of the Stacks chain tip in this reward cycle + /// consensus hash of the sortition that started this reward cycle pub rc_consensus_hash: ConsensusHash, /// the pushed chunk pub chunk_data: StackerDBChunkData, @@ -1348,10 +1337,6 @@ impl NeighborKey { port: na.port, } } - - pub fn to_socketaddr(&self) -> SocketAddr { - self.addrbytes.to_socketaddr(self.port) - } } /// Entry in the neighbor set @@ -1424,47 +1409,25 @@ pub const DENY_MIN_BAN_DURATION: u64 = 2; /// Result of doing network work pub struct NetworkResult { - /// PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) - pub download_pox_id: Option, - /// Network messages we received but did not handle + pub download_pox_id: Option, // PoX ID as it was when we begin downloading blocks (set if we have downloaded new blocks) pub unhandled_messages: HashMap>, - /// Stacks 2.x blocks we downloaded, and time taken - pub blocks: Vec<(ConsensusHash, StacksBlock, u64)>, - /// Stacks 2.x confiremd microblocks we downloaded, and time taken - pub confirmed_microblocks: Vec<(ConsensusHash, Vec, u64)>, - /// Nakamoto blocks we downloaded - pub nakamoto_blocks: HashMap, - /// all transactions pushed to us and their message relay hints - pub pushed_transactions: HashMap, StacksTransaction)>>, - /// all Stacks 2.x blocks pushed to us - pub pushed_blocks: HashMap>, - /// all Stacks 2.x microblocks pushed to us, and the relay hints from the message - pub pushed_microblocks: HashMap, MicroblocksData)>>, - /// transactions sent to us by the http server - pub uploaded_transactions: Vec, - /// blocks sent to us via the http server - pub uploaded_blocks: Vec, - /// microblocks sent to us by the http server - pub uploaded_microblocks: Vec, - /// chunks we received from the HTTP server - pub uploaded_stackerdb_chunks: Vec, - /// Atlas attachments we obtained + pub blocks: Vec<(ConsensusHash, StacksBlock, u64)>, // blocks we downloaded, and time taken + pub confirmed_microblocks: Vec<(ConsensusHash, Vec, u64)>, // confiremd microblocks we downloaded, and time taken + pub pushed_transactions: HashMap, StacksTransaction)>>, // all transactions pushed to us and their message relay hints + pub pushed_blocks: HashMap>, // all blocks pushed to us + pub pushed_microblocks: HashMap, MicroblocksData)>>, // all microblocks pushed to us, and the relay hints from the message + pub uploaded_transactions: Vec, // transactions sent to us by the http server + pub uploaded_blocks: Vec, // blocks sent to us via the http server + pub uploaded_microblocks: Vec, // microblocks sent to us by the http server + pub uploaded_stackerdb_chunks: Vec, // chunks we received from the HTTP server pub attachments: Vec<(AttachmentInstance, Attachment)>, - /// transactions we downloaded via a mempool sync - pub synced_transactions: Vec, - /// chunks for stacker DBs we downloaded - pub stacker_db_sync_results: Vec, - /// Number of times the network state machine has completed one pass + pub synced_transactions: Vec, // transactions we downloaded via a mempool sync + pub stacker_db_sync_results: Vec, // chunks for stacker DBs we downloaded pub num_state_machine_passes: u64, - /// Number of times the Stacks 2.x inventory synchronization has completed one pass pub num_inv_sync_passes: u64, - /// Number of times the Stacks 2.x block downloader has completed one pass pub num_download_passes: u64, - /// The observed burnchain height pub burn_height: u64, - /// The consensus hash of the start of this reward cycle pub rc_consensus_hash: ConsensusHash, - /// The current StackerDB configs pub stacker_db_configs: HashMap, } @@ -1482,7 +1445,6 @@ impl NetworkResult { download_pox_id: None, blocks: vec![], confirmed_microblocks: vec![], - nakamoto_blocks: HashMap::new(), pushed_transactions: HashMap::new(), pushed_blocks: HashMap::new(), pushed_microblocks: HashMap::new(), @@ -1512,10 +1474,6 @@ impl NetworkResult { || self.uploaded_microblocks.len() > 0 } - pub fn has_nakamoto_blocks(&self) -> bool { - self.nakamoto_blocks.len() > 0 - } - pub fn has_transactions(&self) -> bool { self.pushed_transactions.len() > 0 || self.uploaded_transactions.len() > 0 @@ -1546,7 +1504,6 @@ impl NetworkResult { pub fn has_data_to_store(&self) -> bool { self.has_blocks() || self.has_microblocks() - || self.has_nakamoto_blocks() || self.has_transactions() || self.has_attachments() || self.has_stackerdb_chunks() @@ -1625,18 +1582,6 @@ impl NetworkResult { pub fn consume_stacker_db_sync_results(&mut self, mut msgs: Vec) { self.stacker_db_sync_results.append(&mut msgs); } - - pub fn consume_nakamoto_blocks(&mut self, blocks: HashMap>) { - for (_ch, blocks) in blocks.into_iter() { - for block in blocks.into_iter() { - let block_id = block.block_id(); - if self.nakamoto_blocks.contains_key(&block_id) { - continue; - } - self.nakamoto_blocks.insert(block_id, block); - } - } - } } pub trait Requestable: std::fmt::Display { @@ -1728,7 +1673,6 @@ pub mod test { BlockstackOperationType::TransferStx(_) | BlockstackOperationType::DelegateStx(_) | BlockstackOperationType::PreStx(_) - | BlockstackOperationType::VoteForAggregateKey(_) | BlockstackOperationType::StackStx(_) => Ok(()), } } @@ -1922,7 +1866,6 @@ pub mod test { pub winner_txid: Txid, pub matured_rewards: Vec, pub matured_rewards_info: Option, - pub reward_set_data: Option, } pub struct TestEventObserver { @@ -1958,7 +1901,6 @@ pub mod test { _confirmed_mblock_cost: &ExecutionCost, pox_constants: &PoxConstants, reward_set_data: &Option, - _signer_bitvec: &Option>, ) { self.blocks.lock().unwrap().push(TestEventObserverBlock { block: block.clone(), @@ -1968,7 +1910,6 @@ pub mod test { winner_txid, matured_rewards: matured_rewards.to_owned(), matured_rewards_info: matured_rewards_info.map(|info| info.clone()), - reward_set_data: reward_set_data.clone(), }) } @@ -2265,29 +2206,6 @@ pub mod test { stacker_db_syncs } - pub fn neighbor_with_observer( - &self, - privkey: StacksPrivateKey, - observer: Option<&'a TestEventObserver>, - ) -> TestPeer<'a> { - let mut config = self.config.clone(); - config.private_key = privkey; - config.test_name = format!( - "{}.neighbor-{}", - &self.config.test_name, - Hash160::from_node_public_key(&StacksPublicKey::from_private( - &self.config.private_key - )) - ); - config.server_port = 0; - config.http_port = 0; - config.test_stackers = self.config.test_stackers.clone(); - config.initial_neighbors = vec![self.to_neighbor()]; - - let peer = TestPeer::new_with_observer(config, observer); - peer - } - pub fn new_with_observer( mut config: TestPeerConfig, observer: Option<&'a TestEventObserver>, @@ -2686,14 +2604,6 @@ pub mod test { } pub fn step_with_ibd(&mut self, ibd: bool) -> Result { - self.step_with_ibd_and_dns(ibd, None) - } - - pub fn step_with_ibd_and_dns( - &mut self, - ibd: bool, - dns_client: Option<&mut DNSClient>, - ) -> Result { let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); let mut mempool = self.mempool.take().unwrap(); @@ -2704,7 +2614,7 @@ pub mod test { &mut sortdb, &mut stacks_node.chainstate, &mut mempool, - dns_client, + None, false, ibd, 100, @@ -2719,40 +2629,6 @@ pub mod test { ret } - pub fn run_with_ibd( - &mut self, - ibd: bool, - dns_client: Option<&mut DNSClient>, - ) -> Result { - let mut net_result = self.step_with_ibd_and_dns(ibd, dns_client)?; - let mut sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let mut mempool = self.mempool.take().unwrap(); - let indexer = self.indexer.take().unwrap(); - - let receipts_res = self.relayer.process_network_result( - self.network.get_local_peer(), - &mut net_result, - &mut sortdb, - &mut stacks_node.chainstate, - &mut mempool, - ibd, - None, - None, - ); - - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); - self.mempool = Some(mempool); - self.indexer = Some(indexer); - - self.coord.handle_new_burnchain_block().unwrap(); - self.coord.handle_new_stacks_block().unwrap(); - self.coord.handle_new_nakamoto_stacks_block().unwrap(); - - receipts_res - } - pub fn step_dns(&mut self, dns_client: &mut DNSClient) -> Result { let mut sortdb = self.sortdb.take().unwrap(); let mut stacks_node = self.stacks_node.take().unwrap(); @@ -2795,18 +2671,6 @@ pub mod test { ret } - pub fn refresh_burnchain_view(&mut self) { - let sortdb = self.sortdb.take().unwrap(); - let mut stacks_node = self.stacks_node.take().unwrap(); - let indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); - self.network - .refresh_burnchain_view(&indexer, &sortdb, &mut stacks_node.chainstate, false) - .unwrap(); - - self.sortdb = Some(sortdb); - self.stacks_node = Some(stacks_node); - } - pub fn for_each_convo_p2p(&mut self, mut f: F) -> Vec> where F: FnMut(usize, &mut ConversationP2P) -> Result, @@ -2819,35 +2683,11 @@ pub mod test { ret } - pub fn get_burnchain_block_ops( - &self, - burn_block_hash: &BurnchainHeaderHash, - ) -> Vec { - let burnchain_db = - BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), false).unwrap(); - burnchain_db - .get_burnchain_block_ops(burn_block_hash) - .unwrap() - } - - pub fn get_burnchain_block_ops_at_height( - &self, - height: u64, - ) -> Option> { - let sortdb = self.sortdb.as_ref().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - let sort_handle = sortdb.index_handle(&tip.sortition_id); - let Some(sn) = sort_handle.get_block_snapshot_by_height(height).unwrap() else { - return None; - }; - Some(self.get_burnchain_block_ops(&sn.burn_header_hash)) - } - pub fn next_burnchain_block( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, true, true, true); + let x = self.inner_next_burnchain_block(blockstack_ops, true, true); (x.0, x.1, x.2) } @@ -2860,22 +2700,14 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, true, true, true) + self.inner_next_burnchain_block(blockstack_ops, true, true) } pub fn next_burnchain_block_raw( &mut self, blockstack_ops: Vec, ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, true); - (x.0, x.1, x.2) - } - - pub fn next_burnchain_block_raw_sortition_only( - &mut self, - blockstack_ops: Vec, - ) -> (u64, BurnchainHeaderHash, ConsensusHash) { - let x = self.inner_next_burnchain_block(blockstack_ops, false, false, false); + let x = self.inner_next_burnchain_block(blockstack_ops, false, false); (x.0, x.1, x.2) } @@ -2888,7 +2720,7 @@ pub mod test { ConsensusHash, Option, ) { - self.inner_next_burnchain_block(blockstack_ops, false, false, true) + self.inner_next_burnchain_block(blockstack_ops, false, false) } pub fn set_ops_consensus_hash( @@ -2914,84 +2746,6 @@ pub mod test { } } - pub fn make_next_burnchain_block( - burnchain: &Burnchain, - tip_block_height: u64, - tip_block_hash: &BurnchainHeaderHash, - num_ops: u64, - ) -> BurnchainBlockHeader { - test_debug!( - "make_next_burnchain_block: tip_block_height={} tip_block_hash={} num_ops={}", - tip_block_height, - tip_block_hash, - num_ops - ); - let indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); - let parent_hdr = indexer - .read_burnchain_header(tip_block_height) - .unwrap() - .unwrap(); - - test_debug!("parent hdr ({}): {:?}", &tip_block_height, &parent_hdr); - assert_eq!(&parent_hdr.block_hash, tip_block_hash); - - let now = BURNCHAIN_TEST_BLOCK_TIME; - let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( - &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) - .bitcoin_hash(), - ); - test_debug!( - "Block header hash at {} is {}", - tip_block_height + 1, - &block_header_hash - ); - - let block_header = BurnchainBlockHeader { - block_height: tip_block_height + 1, - block_hash: block_header_hash.clone(), - parent_block_hash: parent_hdr.block_hash.clone(), - num_txs: num_ops, - timestamp: now, - }; - - block_header - } - - pub fn add_burnchain_block( - burnchain: &Burnchain, - block_header: &BurnchainBlockHeader, - blockstack_ops: Vec, - ) { - let mut burnchain_db = - BurnchainDB::open(&burnchain.get_burnchaindb_path(), true).unwrap(); - - let mut indexer = BitcoinIndexer::new_unit_test(&burnchain.working_dir); - - test_debug!( - "Store header and block ops for {}-{} ({})", - &block_header.block_hash, - &block_header.parent_block_hash, - block_header.block_height - ); - indexer.raw_store_header(block_header.clone()).unwrap(); - burnchain_db - .raw_store_burnchain_block( - &burnchain, - &indexer, - block_header.clone(), - blockstack_ops, - ) - .unwrap(); - - Burnchain::process_affirmation_maps( - &burnchain, - &mut burnchain_db, - &indexer, - block_header.block_height, - ) - .unwrap(); - } - /// Generate and commit the next burnchain block with the given block operations. /// * if `set_consensus_hash` is true, then each op's consensus_hash field will be set to /// that of the resulting block snapshot. @@ -3009,7 +2763,6 @@ pub mod test { mut blockstack_ops: Vec, set_consensus_hash: bool, set_burn_hash: bool, - update_burnchain: bool, ) -> ( u64, BurnchainHeaderHash, @@ -3028,28 +2781,66 @@ pub mod test { TestPeer::set_ops_consensus_hash(&mut blockstack_ops, &tip.consensus_hash); } - let block_header = Self::make_next_burnchain_block( - &self.config.burnchain, - tip.block_height, - &tip.burn_header_hash, - blockstack_ops.len() as u64, + let mut indexer = BitcoinIndexer::new_unit_test(&self.config.burnchain.working_dir); + let parent_hdr = indexer + .read_burnchain_header(tip.block_height) + .unwrap() + .unwrap(); + + test_debug!("parent hdr ({}): {:?}", &tip.block_height, &parent_hdr); + assert_eq!(parent_hdr.block_hash, tip.burn_header_hash); + + let now = BURNCHAIN_TEST_BLOCK_TIME; + let block_header_hash = BurnchainHeaderHash::from_bitcoin_hash( + &BitcoinIndexer::mock_bitcoin_header(&parent_hdr.block_hash, now as u32) + .bitcoin_hash(), + ); + test_debug!( + "Block header hash at {} is {}", + tip.block_height + 1, + &block_header_hash ); + let block_header = BurnchainBlockHeader { + block_height: tip.block_height + 1, + block_hash: block_header_hash.clone(), + parent_block_hash: parent_hdr.block_hash.clone(), + num_txs: blockstack_ops.len() as u64, + timestamp: now, + }; + if set_burn_hash { - TestPeer::set_ops_burn_header_hash( - &mut blockstack_ops, - &block_header.block_hash, - ); + TestPeer::set_ops_burn_header_hash(&mut blockstack_ops, &block_header_hash); } - if update_burnchain { - Self::add_burnchain_block( + let mut burnchain_db = + BurnchainDB::open(&self.config.burnchain.get_burnchaindb_path(), true).unwrap(); + + test_debug!( + "Store header and block ops for {}-{} ({})", + &block_header.block_hash, + &block_header.parent_block_hash, + block_header.block_height + ); + indexer.raw_store_header(block_header.clone()).unwrap(); + burnchain_db + .raw_store_burnchain_block( &self.config.burnchain, - &block_header, - blockstack_ops.clone(), - ); - } - (block_header.block_height, block_header.block_hash, epoch_id) + &indexer, + block_header.clone(), + blockstack_ops, + ) + .unwrap(); + + Burnchain::process_affirmation_maps( + &self.config.burnchain, + &mut burnchain_db, + &indexer, + block_header.block_height, + ) + .unwrap(); + + (block_header.block_height, block_header_hash, epoch_id) }; let missing_pox_anchor_block_hash_opt = if epoch_id < StacksEpochId::Epoch30 { @@ -3464,7 +3255,6 @@ pub mod test { let tip = SortitionDB::get_canonical_burn_chain_tip(&self.sortdb.as_ref().unwrap().conn()) .unwrap(); - let burnchain = self.config.burnchain.clone(); let (burn_ops, stacks_block, microblocks) = self.make_tenure( |ref mut miner, ref mut sortdb, @@ -3479,7 +3269,6 @@ pub mod test { block_txs.extend_from_slice(txs); let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof, tip.total_burn, @@ -3514,7 +3303,6 @@ pub mod test { ); } - self.refresh_burnchain_view(); tip_id } @@ -3704,7 +3492,7 @@ pub mod test { let sort_iconn = sortdb.index_conn(); let mut miner_epoch_info = builder - .pre_epoch_begin(&mut miner_chainstate, &sort_iconn, true) + .pre_epoch_begin(&mut miner_chainstate, &sort_iconn) .unwrap(); let mut epoch = builder .epoch_begin(&sort_iconn, &mut miner_epoch_info) diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index c819ac049b4..157c79e9d4e 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -423,7 +423,7 @@ pub struct PeerNetworkComms { dead_connections: HashSet, /// Set of neighbors who misbehaved during our comms session broken_connections: HashSet, - /// Ongoing batch of p2p requests. Will be `None` if there are no inflight requests. + /// Ongoing batch of requests. Will be `None` if there are no inflight requests. ongoing_batch_request: Option, } @@ -437,49 +437,6 @@ impl PeerNetworkComms { ongoing_batch_request: None, } } - - /// Drive socket I/O on all outstanding messages and gather up any received messages. - /// Remove handled messages from `state`, and perform the polling (and bookkeeping of dead/broken neighbors) via `neighbor_set` - fn drive_socket_io( - network: &mut PeerNetwork, - state: &mut HashMap, - neighbor_set: &mut NS, - ) -> Vec<(NeighborAddress, StacksMessage)> { - let mut inflight = HashMap::new(); - let mut ret = vec![]; - let stable_block_height = network.get_chain_view().burn_stable_block_height; - for (naddr, rh) in state.drain() { - let mut req_opt = Some(rh); - let message = match neighbor_set.poll_next_reply(network, &naddr, &mut req_opt) { - Ok(Some(msg)) => msg, - Ok(None) => { - if let Some(rh) = req_opt { - // keep trying - inflight.insert(naddr, rh); - } - continue; - } - Err(_e) => { - // peer was already marked as dead in the given network set - continue; - } - }; - - if NeighborCommsRequest::is_message_stale(&message, stable_block_height) { - debug!( - "{:?}: Remote neighbor {:?} is still bootstrapping (at block {})", - &network.get_local_peer(), - &naddr, - message.preamble.burn_stable_block_height - ); - continue; - } - - ret.push((naddr, message)); - } - state.extend(inflight); - ret - } } impl NeighborComms for PeerNetworkComms { @@ -569,7 +526,7 @@ impl NeighborComms for PeerNetworkComms { let mut clear = false; let mut ongoing_batch_request = self.ongoing_batch_request.take(); if let Some(batch) = ongoing_batch_request.as_mut() { - ret = Self::drive_socket_io(network, &mut batch.state, self); + ret.extend(batch.new_replies(self, network)); if batch.count_inflight() == 0 { clear = true; } @@ -631,6 +588,67 @@ pub struct NeighborCommsRequest { state: HashMap, } +/// This struct represents everything we need to iterate through a set of ongoing requests, in +/// order to pull out completed replies. +pub struct NeighborCommsMessageIterator<'a, NS: NeighborComms> { + network: &'a mut PeerNetwork, + state: &'a mut HashMap, + neighbor_set: &'a mut NS, +} + +/// This is an iterator over completed requests +impl Iterator for NeighborCommsMessageIterator<'_, NS> { + type Item = (NeighborAddress, StacksMessage); + + fn next(&mut self) -> Option { + let mut inflight = HashMap::new(); + let mut ret = None; + let stable_block_height = self.network.get_chain_view().burn_stable_block_height; + for (naddr, rh) in self.state.drain() { + if ret.is_some() { + // just save for retry + inflight.insert(naddr, rh); + continue; + } + + let mut req_opt = Some(rh); + let message = + match self + .neighbor_set + .poll_next_reply(self.network, &naddr, &mut req_opt) + { + Ok(Some(msg)) => msg, + Ok(None) => { + assert!(req_opt.is_some()); + if let Some(rh) = req_opt { + // keep trying + inflight.insert(naddr, rh); + } + continue; + } + Err(_e) => { + // peer was already marked as dead in the given network set + continue; + } + }; + + if NeighborCommsRequest::is_message_stale(&message, stable_block_height) { + debug!( + "{:?}: Remote neighbor {:?} is still bootstrapping (at block {})", + &self.network.get_local_peer(), + &naddr, + message.preamble.burn_stable_block_height + ); + continue; + } + + ret = Some((naddr, message)); + } + self.state.extend(inflight); + ret + } +} + impl NeighborCommsRequest { pub fn new() -> NeighborCommsRequest { NeighborCommsRequest { @@ -648,6 +666,19 @@ impl NeighborCommsRequest { msg.preamble.burn_stable_block_height + MAX_NEIGHBOR_BLOCK_DELAY < burn_block_height } + /// Iterate over all in-flight requests + pub fn new_replies<'a, NS: NeighborComms>( + &'a mut self, + neighbor_set: &'a mut NS, + network: &'a mut PeerNetwork, + ) -> NeighborCommsMessageIterator { + NeighborCommsMessageIterator { + network, + state: &mut self.state, + neighbor_set, + } + } + /// How many inflight requests remaining? #[cfg_attr(test, mutants::skip)] pub fn count_inflight(&self) -> usize { diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index 276d04124e9..9f2e78151cd 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -34,10 +34,12 @@ use crate::util_lib::db::{DBConn, DBTx, Error as db_error}; pub mod comms; pub mod db; pub mod neighbor; -pub mod rpc; pub mod walk; -pub use comms::{NeighborComms, PeerNetworkComms, ToNeighborKey}; +pub use comms::{ + NeighborComms, NeighborCommsMessageIterator, NeighborCommsRequest, PeerNetworkComms, + ToNeighborKey, +}; pub use db::{NeighborReplacements, NeighborWalkDB, PeerDBNeighborWalk}; pub use walk::{NeighborPingback, NeighborWalk, NeighborWalkResult}; diff --git a/stackslib/src/net/neighbors/rpc.rs b/stackslib/src/net/neighbors/rpc.rs deleted file mode 100644 index 3a5378803f5..00000000000 --- a/stackslib/src/net/neighbors/rpc.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::{HashMap, HashSet}; -use std::{cmp, mem}; - -use stacks_common::types::chainstate::StacksPublicKey; -use stacks_common::types::net::PeerHost; -use stacks_common::util::hash::Hash160; -use stacks_common::util::log; -use stacks_common::util::secp256k1::Secp256k1PublicKey; - -use crate::burnchains::{Address, PublicKey}; -use crate::core::PEER_VERSION_TESTNET; -use crate::net::connection::{ConnectionOptions, ReplyHandleP2P}; -use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::neighbors::comms::ToNeighborKey; -use crate::net::neighbors::{ - NeighborWalk, NeighborWalkDB, NeighborWalkResult, MAX_NEIGHBOR_BLOCK_DELAY, - NEIGHBOR_MINIMUM_CONTACT_INTERVAL, -}; -use crate::net::p2p::PeerNetwork; -use crate::net::server::HttpPeer; -use crate::net::{ - Error as NetError, HandshakeData, Neighbor, NeighborAddress, NeighborKey, PeerAddress, - PeerHostExtensions, StacksHttpRequest, StacksHttpResponse, StacksMessage, StacksMessageType, - NUM_NEIGHBORS, -}; - -/// This struct represents a batch of in-flight RPCs to a set of peers, identified by a -/// neighbor key (or something that converts to it) -#[derive(Debug)] -pub struct NeighborRPC { - state: HashMap)>, - dead: HashSet, - broken: HashSet, -} - -impl NeighborRPC { - pub fn new() -> Self { - Self { - state: HashMap::new(), - dead: HashSet::new(), - broken: HashSet::new(), - } - } - - /// Add a dead neighbor -- a neighbor which failed to communicate with us. - pub fn add_dead(&mut self, network: &PeerNetwork, naddr: &NeighborAddress) { - self.dead.insert(naddr.to_neighbor_key(network)); - } - - /// Add a broken neighbor -- a neighbor which violated protocol. - pub fn add_broken(&mut self, network: &PeerNetwork, naddr: &NeighborAddress) { - self.broken.insert(naddr.to_neighbor_key(network)); - } - - /// Is a neighbor dead? - pub fn is_dead(&self, network: &PeerNetwork, naddr: &NeighborAddress) -> bool { - self.dead.contains(&naddr.to_neighbor_key(network)) - } - - /// Is a neighbor broken - pub fn is_broken(&self, network: &PeerNetwork, naddr: &NeighborAddress) -> bool { - self.broken.contains(&naddr.to_neighbor_key(network)) - } - - /// Is a neighbor dead or broken? - pub fn is_dead_or_broken(&self, network: &PeerNetwork, naddr: &NeighborAddress) -> bool { - let nk = naddr.to_neighbor_key(network); - self.dead.contains(&nk) || self.broken.contains(&nk) - } - - /// Extract the list of dead neighbors - pub fn take_dead(&mut self) -> HashSet { - std::mem::replace(&mut self.dead, HashSet::new()) - } - - /// Extract the list of broken neighbors - pub fn take_broken(&mut self) -> HashSet { - std::mem::replace(&mut self.broken, HashSet::new()) - } - - /// Collect all in-flight replies into a vec. - /// This also pushes data into each connection's socket write buffer, - /// so the client of this module should eagerly call this over and over again. - pub fn collect_replies( - &mut self, - network: &mut PeerNetwork, - ) -> Vec<(NeighborAddress, StacksHttpResponse)> { - let mut inflight = HashMap::new(); - let mut dead = vec![]; - let mut ret = vec![]; - for (naddr, (event_id, mut request_opt)) in self.state.drain() { - let response = match NeighborRPC::poll_next_reply(network, event_id, &mut request_opt) { - Ok(Some(response)) => response, - Ok(None) => { - // keep trying - inflight.insert(naddr, (event_id, request_opt)); - continue; - } - Err(NetError::WaitingForDNS) => { - // keep trying - inflight.insert(naddr, (event_id, request_opt)); - continue; - } - Err(_e) => { - // declare this neighbor as dead by default - dead.push(naddr); - continue; - } - }; - - ret.push((naddr, response)); - } - for naddr in dead.into_iter() { - self.add_dead(network, &naddr); - } - self.state.extend(inflight); - ret - } - - /// How many inflight requests remaining? - pub fn count_inflight(&self) -> usize { - self.state.len() - } - - /// Does a neighbor have an in-flight request? - pub fn has_inflight(&self, naddr: &NeighborAddress) -> bool { - self.state.contains_key(naddr) - } - - /// Find the PeerHost to use when creating a Stacks HTTP request. - /// Returns Some(host) if we're connected and authenticated to this peer - /// Returns None otherwise. - pub fn get_peer_host(network: &PeerNetwork, addr: &NeighborAddress) -> Option { - let nk = addr.to_neighbor_key(network); - let convo = network.get_neighbor_convo(&nk)?; - PeerHost::try_from_url(&convo.data_url) - } - - /// Send an HTTP request to the given neighbor's HTTP endpoint. - /// Returns Ok(()) if we successfully queue the request. - /// Returns Err(..) if we fail to connect to the remote peer for some reason. - pub fn send_request( - &mut self, - network: &mut PeerNetwork, - naddr: NeighborAddress, - request: StacksHttpRequest, - ) -> Result<(), NetError> { - let nk = naddr.to_neighbor_key(network); - let convo = network - .get_neighbor_convo(&nk) - .ok_or(NetError::PeerNotConnected)?; - let data_url = convo.data_url.clone(); - let data_addr = if let Some(ip) = convo.data_ip { - ip.clone() - } else { - if convo.waiting_for_dns() { - debug!( - "{}: have not resolved {} data URL {} yet: waiting for DNS", - network.get_local_peer(), - &convo, - &data_url - ); - return Err(NetError::WaitingForDNS); - } else { - debug!( - "{}: have not resolved {} data URL {} yet, and not waiting for DNS", - network.get_local_peer(), - &convo, - &data_url - ); - return Err(NetError::PeerNotConnected); - } - }; - - let event_id = - PeerNetwork::with_network_state(network, |ref mut network, ref mut network_state| { - PeerNetwork::with_http(network, |ref mut network, ref mut http| { - match http.connect_http(network_state, network, data_url, data_addr, None) { - Ok(event_id) => Ok(event_id), - Err(NetError::AlreadyConnected(event_id, _)) => Ok(event_id), - Err(e) => { - return Err(e); - } - } - }) - })?; - - self.state.insert(naddr, (event_id, Some(request))); - Ok(()) - } - - /// Drive I/O on a given network conversation. - /// Send the HTTP request if we haven't already done so, saturate the underlying TCP socket - /// with bytes, and poll the event loop for any completed messages. If we get one, then return - /// it. - /// - /// Returns Ok(Some(response)) if the HTTP request completed - /// Returns Ok(None) if we are still connecting to the remote peer, or waiting for it to reply - /// Returns Err(NetError::WaitingForDNS) if we're still waiting to resolve the peer's data URL - /// Returns Err(..) if we fail to connect, or if we are unable to receive a reply. - fn poll_next_reply( - network: &mut PeerNetwork, - event_id: usize, - request_opt: &mut Option, - ) -> Result, NetError> { - PeerNetwork::with_http(network, |network, http| { - // make sure we're connected - let (Some(ref mut convo), Some(ref mut socket)) = - http.get_conversation_and_socket(event_id) - else { - if http.is_connecting(event_id) { - debug!( - "{:?}: HTTP event {} is not connected yet", - &network.local_peer, event_id - ); - return Ok(None); - } else { - // conversation died - debug!("{:?}: HTTP event {} hung up", &network.local_peer, event_id); - return Err(NetError::PeerNotConnected); - } - }; - - // drive socket I/O - if let Some(request) = request_opt.take() { - convo.send_request(request)?; - }; - HttpPeer::saturate_http_socket(socket, convo)?; - - // see if we got any data - let Some(http_response) = convo.try_get_response() else { - // still waiting - debug!( - "{:?}: HTTP event {} is still waiting for a response", - &network.local_peer, event_id - ); - return Ok(None); - }; - - Ok(Some(http_response)) - }) - } -} diff --git a/stackslib/src/net/neighbors/walk.rs b/stackslib/src/net/neighbors/walk.rs index 8a0e370ba8f..92481406299 100644 --- a/stackslib/src/net/neighbors/walk.rs +++ b/stackslib/src/net/neighbors/walk.rs @@ -28,8 +28,8 @@ use crate::burnchains::{Address, Burnchain, BurnchainView, PublicKey}; use crate::net::connection::{ConnectionOptions, ReplyHandleP2P}; use crate::net::db::{LocalPeer, PeerDB}; use crate::net::neighbors::{ - NeighborComms, NeighborReplacements, NeighborWalkDB, ToNeighborKey, MAX_NEIGHBOR_BLOCK_DELAY, - NEIGHBOR_MINIMUM_CONTACT_INTERVAL, + NeighborComms, NeighborCommsRequest, NeighborReplacements, NeighborWalkDB, ToNeighborKey, + MAX_NEIGHBOR_BLOCK_DELAY, NEIGHBOR_MINIMUM_CONTACT_INTERVAL, }; use crate::net::p2p::PeerNetwork; use crate::net::{ diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index b60146dff31..345426aa3ac 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -15,7 +15,7 @@ // along with this program. If not, see . use std::cmp::Ordering; -use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use std::sync::mpsc::{ sync_channel, Receiver, RecvError, SendError, SyncSender, TryRecvError, TrySendError, @@ -36,7 +36,6 @@ use stacks_common::types::StacksEpochId; use stacks_common::util::hash::to_hex; use stacks_common::util::secp256k1::Secp256k1PublicKey; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, log}; -use wsts::curve::point::Point; use {mio, url}; use crate::burnchains::db::{BurnchainDB, BurnchainHeaderReader}; @@ -57,7 +56,6 @@ use crate::net::atlas::{AtlasDB, AttachmentInstance, AttachmentsDownloader}; use crate::net::chat::{ConversationP2P, NeighborStats}; use crate::net::connection::{ConnectionOptions, NetworkReplyHandle, ReplyHandleP2P}; use crate::net::db::{LocalPeer, PeerDB}; -use crate::net::download::nakamoto::NakamotoDownloadStateMachine; use crate::net::download::BlockDownloader; use crate::net::http::HttpRequestContents; use crate::net::httpcore::StacksHttpRequest; @@ -221,29 +219,10 @@ pub struct PeerNetwork { // refreshed whenever the burnchain advances pub chain_view: BurnchainView, pub burnchain_tip: BlockSnapshot, + pub stacks_tip: (ConsensusHash, BlockHeaderHash, u64), pub chain_view_stable_consensus_hash: ConsensusHash, pub ast_rules: ASTRules, - /// Current Stacks tip -- the highest block's consensus hash, block hash, and height - pub stacks_tip: (ConsensusHash, BlockHeaderHash, u64), - /// Sortition that corresponds to the current Stacks tip, if known - pub stacks_tip_sn: Option, - /// Parent tenure Stacks tip -- the last block in the current tip's parent tenure. - /// In epoch 2.x, this is the parent block. - /// In nakamoto, this is the last block in the parent tenure - pub parent_stacks_tip: (ConsensusHash, BlockHeaderHash, u64), - /// The block id of the first block in this tenure. - /// In epoch 2.x, this is the same as the tip block ID - /// In nakamoto, this is the block ID of the first block in the current tenure - pub tenure_start_block_id: StacksBlockId, - /// The aggregate public keys of each witnessed reward cycle. - /// Only active during epoch 3.x and beyond. - /// Gets refreshed on each new Stacks block arrival, which deals with burnchain forks. - /// Stored in a BTreeMap because we often need to query the last or second-to-last reward cycle - /// aggregate public key, and we need to determine whether or not to load new reward cycles' - /// keys. - pub aggregate_public_keys: BTreeMap>, - // information about the state of the network's anchor blocks pub heaviest_affirmation_map: AffirmationMap, pub stacks_tip_affirmation_map: AffirmationMap, @@ -311,10 +290,8 @@ pub struct PeerNetwork { // (maintained by the downloader state machine) pub header_cache: BlockHeaderCache, - /// Epoch 2.x peer block download state + // peer block download state pub block_downloader: Option, - /// Epoch 3.x (nakamoto) peer block download state - pub block_downloader_nakamoto: Option, // peer attachment downloader pub attachments_downloader: Option, @@ -450,10 +427,6 @@ impl PeerNetwork { first_burn_header_ts as u64, ), stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), - stacks_tip_sn: None, - parent_stacks_tip: (ConsensusHash([0x00; 20]), BlockHeaderHash([0x00; 32]), 0), - tenure_start_block_id: StacksBlockId([0x00; 32]), - aggregate_public_keys: BTreeMap::new(), peerdb: peerdb, atlasdb: atlasdb, @@ -496,7 +469,6 @@ impl PeerNetwork { header_cache: BlockHeaderCache::new(), block_downloader: None, - block_downloader_nakamoto: None, attachments_downloader: None, stacker_db_syncs: Some(stacker_db_sync_map), @@ -570,25 +542,13 @@ impl PeerNetwork { /// Get the current epoch pub fn get_current_epoch(&self) -> StacksEpoch { - self.get_epoch_at_burn_height(self.chain_view.burn_block_height) - } - - /// Get an epoch at a burn block height - pub fn get_epoch_at_burn_height(&self, burn_height: u64) -> StacksEpoch { - let epoch_index = StacksEpoch::find_epoch(&self.epochs, burn_height) - .unwrap_or_else(|| panic!("BUG: block {} is not in a known epoch", burn_height,)); - let epoch = self - .epochs - .get(epoch_index) - .expect("BUG: no epoch at found index") - .clone(); - epoch - } - - /// Get an epoch by epoch ID - pub fn get_epoch_by_epoch_id(&self, epoch_id: StacksEpochId) -> StacksEpoch { - let epoch_index = StacksEpoch::find_epoch_by_id(&self.epochs, epoch_id) - .unwrap_or_else(|| panic!("BUG: epoch {} is not in a known epoch", epoch_id,)); + let epoch_index = StacksEpoch::find_epoch(&self.epochs, self.chain_view.burn_block_height) + .unwrap_or_else(|| { + panic!( + "BUG: block {} is not in a known epoch", + &self.chain_view.burn_block_height + ) + }); let epoch = self .epochs .get(epoch_index) @@ -735,11 +695,6 @@ impl PeerNetwork { self.peers.keys() } - /// Get an iterator over all of the conversations - pub fn iter_peer_convos(&self) -> impl Iterator { - self.peers.iter() - } - /// Get the PoX ID pub fn get_pox_id(&self) -> &PoxId { &self.pox_id @@ -1814,7 +1769,7 @@ impl PeerNetwork { } if let Some(inv_state) = self.inv_state_nakamoto.as_mut() { debug!( - "{:?}: Remove inventory state for Nakamoto {:?}", + "{:?}: Remove inventory state for epoch 2.x {:?}", &self.local_peer, &nk ); inv_state.del_peer(&NeighborAddress::from_neighbor_key(nk, pubkh)); @@ -2025,7 +1980,6 @@ impl PeerNetwork { event_id: usize, sortdb: &SortitionDB, chainstate: &mut StacksChainState, - dns_client_opt: &mut Option<&mut DNSClient>, ibd: bool, ) -> Result<(Vec, bool), net_error> { self.with_p2p_convo(event_id, |network, convo, client_sock| { @@ -2058,7 +2012,7 @@ impl PeerNetwork { // react to inbound messages -- do we need to send something out, or fulfill requests // to other threads? Try to chat even if the recv() failed, since we'll want to at // least drain the conversation inbox. - let unhandled = match convo.chat(network, sortdb, chainstate, dns_client_opt, ibd) { + let unhandled = match convo.chat(network, sortdb, chainstate, ibd) { Err(e) => { debug!( "Failed to converse on event {} (socket {:?}): {:?}", @@ -2114,7 +2068,6 @@ impl PeerNetwork { &mut self, sortdb: &SortitionDB, chainstate: &mut StacksChainState, - dns_client_opt: &mut Option<&mut DNSClient>, poll_state: &mut NetworkPollState, ibd: bool, ) -> (Vec, HashMap>) { @@ -2122,25 +2075,20 @@ impl PeerNetwork { let mut unhandled: HashMap> = HashMap::new(); for event_id in &poll_state.ready { - let (mut convo_unhandled, alive) = match self.process_p2p_conversation( - *event_id, - sortdb, - chainstate, - dns_client_opt, - ibd, - ) { - Ok((convo_unhandled, alive)) => (convo_unhandled, alive), - Err(_e) => { - test_debug!( - "{:?}: Connection to {:?} failed: {:?}", - &self.local_peer, - self.get_p2p_convo(*event_id), - &_e - ); - to_remove.push(*event_id); - continue; - } - }; + let (mut convo_unhandled, alive) = + match self.process_p2p_conversation(*event_id, sortdb, chainstate, ibd) { + Ok((convo_unhandled, alive)) => (convo_unhandled, alive), + Err(_e) => { + test_debug!( + "{:?}: Connection to {:?} failed: {:?}", + &self.local_peer, + self.get_p2p_convo(*event_id), + &_e + ); + to_remove.push(*event_id); + continue; + } + }; if !alive { test_debug!( @@ -3921,7 +3869,6 @@ impl PeerNetwork { /// This will call the epoch-appropriate network worker fn do_network_work( &mut self, - burnchain_height: u64, sortdb: &SortitionDB, chainstate: &mut StacksChainState, dns_client_opt: &mut Option<&mut DNSClient>, @@ -3934,20 +3881,12 @@ impl PeerNetwork { debug!("{:?}: run Nakamoto work loop", self.get_local_peer()); // in Nakamoto epoch, so do Nakamoto things - let prune = self.do_network_work_nakamoto( - burnchain_height, - sortdb, - chainstate, - ibd, - network_result, - ); + let prune = self.do_network_work_nakamoto(sortdb, ibd); // in Nakamoto epoch, but we might still be doing epoch 2.x things since Nakamoto does // not begin on a reward cycle boundary. if cur_epoch.epoch_id == StacksEpochId::Epoch30 - && (self.burnchain_tip.block_height - <= cur_epoch.start_height - + u64::from(self.burnchain.pox_constants.reward_cycle_length) + && (self.burnchain_tip.block_height <= cur_epoch.start_height || self.connection_opts.force_nakamoto_epoch_transition) { debug!( @@ -3989,14 +3928,7 @@ impl PeerNetwork { /// Return true if we need to prune connections. /// Used only for nakamoto. /// TODO: put this into a separate file for nakamoto p2p code paths - fn do_network_work_nakamoto( - &mut self, - burnchain_height: u64, - sortdb: &SortitionDB, - chainstate: &StacksChainState, - ibd: bool, - network_result: &mut NetworkResult, - ) -> bool { + fn do_network_work_nakamoto(&mut self, sortdb: &SortitionDB, ibd: bool) -> bool { // do some Actual Work(tm) let mut do_prune = false; let mut did_cycle = false; @@ -4010,22 +3942,6 @@ impl PeerNetwork { &self.nakamoto_work_state; "learned_new_blocks?" => learned ); - - // always do block download - let new_blocks = self - .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { - warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e - }) - .unwrap_or(HashMap::new()); - - network_result.consume_nakamoto_blocks(new_blocks); - let cur_state = self.nakamoto_work_state; match self.nakamoto_work_state { PeerNetworkWorkState::GetPublicIP => { @@ -4044,7 +3960,10 @@ impl PeerNetwork { self.nakamoto_work_state = PeerNetworkWorkState::BlockDownload; } PeerNetworkWorkState::BlockDownload => { - // this state is useless in Nakamoto since we're always doing download-syncs + debug!( + "{:?}: Block download for Nakamoto is not yet implemented", + self.get_local_peer() + ); self.nakamoto_work_state = PeerNetworkWorkState::AntiEntropy; } PeerNetworkWorkState::AntiEntropy => { @@ -5305,123 +5224,6 @@ impl PeerNetwork { Ok(()) } - /// Load up the parent stacks tip. - /// For epoch 2.x, this is the pointer to the parent block of the current stacks tip - /// For epoch 3.x, this is the pointer to the tenure-start block of the parent tenure of the - /// current stacks tip. - /// If this is the first tenure in epoch 3.x, then this is the pointer to the epoch 2.x block - /// that it builds atop. - pub(crate) fn get_parent_stacks_tip( - cur_epoch: StacksEpochId, - chainstate: &StacksChainState, - stacks_tip_block_id: &StacksBlockId, - ) -> Result<(ConsensusHash, BlockHeaderHash, u64), net_error> { - let header = NakamotoChainState::get_block_header(chainstate.db(), stacks_tip_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let parent_header = if cur_epoch < StacksEpochId::Epoch30 { - // prior to epoch 3.0, the self.prev_stacks_tip field is just the parent block - let parent_block_id = - StacksChainState::get_parent_block_id(chainstate.db(), &header.index_block_hash())? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - NakamotoChainState::get_block_header(chainstate.db(), &parent_block_id)? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } else { - // in epoch 3.0 and later, self.prev_stacks_tip is the first tenure block of the - // current tip's parent tenure. - match NakamotoChainState::get_nakamoto_parent_tenure_id_consensus_hash( - chainstate.db(), - &header.consensus_hash, - )? { - Some(ch) => NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &ch, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?, - None => { - // parent in epoch 2 - let tenure_start_block_header = - NakamotoChainState::get_block_header_by_consensus_hash( - chainstate.db(), - &header.consensus_hash, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - let nakamoto_header = tenure_start_block_header - .anchored_header - .as_stacks_nakamoto() - .ok_or(net_error::DBError(db_error::NotFoundError))?; - - NakamotoChainState::get_block_header( - chainstate.db(), - &nakamoto_header.parent_block_id, - )? - .ok_or(net_error::DBError(db_error::NotFoundError))? - } - } - }; - Ok(( - parent_header.consensus_hash, - parent_header.anchored_header.block_hash(), - parent_header.anchored_header.height(), - )) - } - - /// Refresh our view of the aggregate public keys - /// Returns a list of (reward-cycle, option(pubkey)) pairs. - /// An option(pubkey) is defined for all reward cycles, but for epochs 2.4 and earlier, it will - /// be None. - fn find_new_aggregate_public_keys( - &mut self, - sortdb: &SortitionDB, - tip_sn: &BlockSnapshot, - chainstate: &mut StacksChainState, - stacks_tip_block_id: &StacksBlockId, - ) -> Result)>, net_error> { - let sort_tip_rc = self - .burnchain - .block_height_to_reward_cycle(tip_sn.block_height) - .expect("FATAL: sortition from before system start"); - let next_agg_pubkey_rc = self - .aggregate_public_keys - .last_key_value() - .map(|(rc, _)| rc.saturating_add(1)) - .unwrap_or(0); - let mut new_agg_pubkeys: Vec<_> = (next_agg_pubkey_rc..=sort_tip_rc) - .filter_map(|key_rc| { - let ih = sortdb.index_handle(&tip_sn.sortition_id); - let agg_pubkey_opt = if self.get_current_epoch().epoch_id < StacksEpochId::Epoch25 { - None - } else { - test_debug!( - "Try to get aggregate public key for reward cycle {}", - key_rc - ); - NakamotoChainState::load_aggregate_public_key( - sortdb, - &ih, - chainstate, - self.burnchain.reward_cycle_to_block_height(key_rc), - &stacks_tip_block_id, - false, - ) - .ok() - }; - if agg_pubkey_opt.is_none() { - return None; - } - Some((key_rc, agg_pubkey_opt)) - }) - .collect(); - - if new_agg_pubkeys.len() == 0 && self.aggregate_public_keys.len() == 0 { - // special case -- we're before epoch 3.0, so don't waste time doing this again - new_agg_pubkeys.push((sort_tip_rc, None)); - } - Ok(new_agg_pubkeys) - } - /// Refresh view of burnchain, if needed. /// If the burnchain view changes, then take the following additional steps: /// * hint to the inventory sync state-machine to restart, since we potentially have a new @@ -5437,81 +5239,29 @@ impl PeerNetwork { ibd: bool, ) -> Result>, net_error> { // update burnchain snapshot if we need to (careful -- it's expensive) - let canonical_sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + let sn = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash_and_height(sortdb.conn())?; - let burnchain_tip_changed = canonical_sn.block_height != self.chain_view.burn_block_height - || self.num_state_machine_passes == 0; + let burnchain_tip_changed = sn.block_height != self.chain_view.burn_block_height; let stacks_tip_changed = self.stacks_tip != stacks_tip; - let new_stacks_tip_block_id = StacksBlockId::new(&stacks_tip.0, &stacks_tip.1); - let need_stackerdb_refresh = canonical_sn.canonical_stacks_tip_consensus_hash - != self.burnchain_tip.canonical_stacks_tip_consensus_hash - || burnchain_tip_changed - || stacks_tip_changed; let mut ret: HashMap> = HashMap::new(); - let aggregate_public_keys = self.find_new_aggregate_public_keys( - sortdb, - &canonical_sn, - chainstate, - &new_stacks_tip_block_id, - )?; - let (parent_stacks_tip, tenure_start_block_id, stacks_tip_sn) = if stacks_tip_changed { - let stacks_tip_sn = - SortitionDB::get_block_snapshot_consensus(sortdb.conn(), &stacks_tip.0)?; - let tenure_start_block_id = if let Some(header) = - NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &stacks_tip.0, - )? { - header.index_block_hash() - } else { - new_stacks_tip_block_id.clone() - }; - let parent_tip_id = match Self::get_parent_stacks_tip( - self.get_current_epoch().epoch_id, - chainstate, - &new_stacks_tip_block_id, - ) { - Ok(tip_id) => tip_id, - Err(net_error::DBError(db_error::NotFoundError)) => { - // this is the first block - ( - FIRST_BURNCHAIN_CONSENSUS_HASH.clone(), - FIRST_STACKS_BLOCK_HASH.clone(), - 0, - ) - } - Err(e) => return Err(e), - }; - (parent_tip_id, tenure_start_block_id, stacks_tip_sn) - } else { - ( - self.parent_stacks_tip.clone(), - self.tenure_start_block_id.clone(), - self.stacks_tip_sn.clone(), - ) - }; - if burnchain_tip_changed || stacks_tip_changed { // only do the needful depending on what changed debug!( "{:?}: load chain view for burn block {}", - &self.local_peer, canonical_sn.block_height + &self.local_peer, sn.block_height ); - let new_chain_view = SortitionDB::get_burnchain_view( - &sortdb.index_conn(), - &self.burnchain, - &canonical_sn, - )?; + let new_chain_view = + SortitionDB::get_burnchain_view(&sortdb.index_conn(), &self.burnchain, &sn)?; let new_chain_view_stable_consensus_hash = { let ic = sortdb.index_conn(); let ancestor_sn = SortitionDB::get_ancestor_snapshot( &ic, new_chain_view.burn_stable_block_height, - &canonical_sn.sortition_id, + &sn.sortition_id, )? .unwrap_or(SortitionDB::get_first_block_snapshot(sortdb.conn())?); ancestor_sn.consensus_hash @@ -5539,44 +5289,39 @@ impl PeerNetwork { self.pox_id.num_inventory_reward_cycles().saturating_sub(1) as u64; // update tx validation information - self.ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), canonical_sn.block_height)?; + self.ast_rules = SortitionDB::get_ast_rules(sortdb.conn(), sn.block_height)?; - if self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { - // update heaviest affirmation map view - let burnchain_db = self.burnchain.open_burnchain_db(false)?; + // update heaviest affirmation map view + let burnchain_db = self.burnchain.open_burnchain_db(false)?; - self.heaviest_affirmation_map = static_get_heaviest_affirmation_map( - &self.burnchain, - indexer, - &burnchain_db, - sortdb, - &canonical_sn.sortition_id, - ) - .map_err(|_| { - net_error::Transient("Unable to query heaviest affirmation map".to_string()) - })?; + self.heaviest_affirmation_map = static_get_heaviest_affirmation_map( + &self.burnchain, + indexer, + &burnchain_db, + sortdb, + &sn.sortition_id, + ) + .map_err(|_| { + net_error::Transient("Unable to query heaviest affirmation map".to_string()) + })?; - self.tentative_best_affirmation_map = static_get_canonical_affirmation_map( - &self.burnchain, - indexer, - &burnchain_db, - sortdb, - chainstate, - &canonical_sn.sortition_id, - ) - .map_err(|_| { - net_error::Transient("Unable to query canonical affirmation map".to_string()) - })?; + self.tentative_best_affirmation_map = static_get_canonical_affirmation_map( + &self.burnchain, + indexer, + &burnchain_db, + sortdb, + chainstate, + &sn.sortition_id, + ) + .map_err(|_| { + net_error::Transient("Unable to query canonical affirmation map".to_string()) + })?; - self.sortition_tip_affirmation_map = - SortitionDB::find_sortition_tip_affirmation_map( - sortdb, - &canonical_sn.sortition_id, - )?; - } + self.sortition_tip_affirmation_map = + SortitionDB::find_sortition_tip_affirmation_map(sortdb, &sn.sortition_id)?; // update last anchor data - let ih = sortdb.index_handle(&canonical_sn.sortition_id); + let ih = sortdb.index_handle(&sn.sortition_id); self.last_anchor_block_hash = ih .get_last_selected_anchor_block_hash()? .unwrap_or(BlockHeaderHash([0x00; 32])); @@ -5584,29 +5329,19 @@ impl PeerNetwork { .get_last_selected_anchor_block_txid()? .unwrap_or(Txid([0x00; 32])); - test_debug!( - "{:?}: chain view is {:?}", - &self.get_local_peer(), - &self.chain_view - ); - } - - if need_stackerdb_refresh { - // refresh stackerdb configs -- canonical stacks tip has changed - debug!("{:?}: Refresh all stackerdbs", &self.get_local_peer()); + // refresh stackerdb configs self.refresh_stacker_db_configs(sortdb, chainstate)?; } - if stacks_tip_changed && self.get_current_epoch().epoch_id < StacksEpochId::Epoch30 { + if stacks_tip_changed { // update stacks tip affirmation map view - // (NOTE: this check has to happen _after_ self.chain_view gets updated!) let burnchain_db = self.burnchain.open_burnchain_db(false)?; self.stacks_tip_affirmation_map = static_get_stacks_tip_affirmation_map( &burnchain_db, sortdb, - &canonical_sn.sortition_id, - &canonical_sn.canonical_stacks_tip_consensus_hash, - &canonical_sn.canonical_stacks_tip_hash, + &sn.sortition_id, + &sn.canonical_stacks_tip_consensus_hash, + &sn.canonical_stacks_tip_hash, ) .map_err(|_| { net_error::Transient("Unable to query stacks tip affirmation map".to_string()) @@ -5622,16 +5357,9 @@ impl PeerNetwork { self.handle_unsolicited_messages(sortdb, chainstate, buffered_messages, ibd, false); } - // update cached stacks chain view for /v2/info and /v3/tenures/info - self.burnchain_tip = canonical_sn; + // update cached stacks chain view for /v2/info + self.burnchain_tip = sn; self.stacks_tip = stacks_tip; - self.stacks_tip_sn = stacks_tip_sn; - self.parent_stacks_tip = parent_stacks_tip; - for (key_rc, agg_pubkey_opt) in aggregate_public_keys { - self.aggregate_public_keys.insert(key_rc, agg_pubkey_opt); - } - self.tenure_start_block_id = tenure_start_block_id; - Ok(ret) } @@ -5643,7 +5371,6 @@ impl PeerNetwork { fn dispatch_network( &mut self, network_result: &mut NetworkResult, - burnchain_height: u64, sortdb: &SortitionDB, mempool: &MemPoolDB, chainstate: &mut StacksChainState, @@ -5667,13 +5394,8 @@ impl PeerNetwork { let unauthenticated_inbounds = self.find_unauthenticated_inbound_convos(); // run existing conversations, clear out broken ones, and get back messages forwarded to us - let (error_events, unsolicited_messages) = self.process_ready_sockets( - sortdb, - chainstate, - &mut dns_client_opt, - &mut poll_state, - ibd, - ); + let (error_events, unsolicited_messages) = + self.process_ready_sockets(sortdb, chainstate, &mut poll_state, ibd); for error_event in error_events { debug!( "{:?}: Failed connection on event {}", @@ -5692,7 +5414,6 @@ impl PeerNetwork { // do this _after_ processing new sockets, so the act of opening a socket doesn't trample // an already-used network ID. let do_prune = self.do_network_work( - burnchain_height, sortdb, chainstate, &mut dns_client_opt, @@ -5729,17 +5450,13 @@ impl PeerNetwork { self.do_attachment_downloads(dns_client_opt, network_result); // synchronize stacker DBs - if !ibd { - match self.run_stacker_db_sync() { - Ok(stacker_db_sync_results) => { - network_result.consume_stacker_db_sync_results(stacker_db_sync_results); - } - Err(e) => { - warn!("Failed to run Stacker DB sync: {:?}", &e); - } + match self.run_stacker_db_sync() { + Ok(stacker_db_sync_results) => { + network_result.consume_stacker_db_sync_results(stacker_db_sync_results); + } + Err(e) => { + warn!("Failed to run Stacker DB sync: {:?}", &e); } - } else { - debug!("{}: skip StackerDB sync in IBD", self.get_local_peer()); } // remove timed-out requests from other threads @@ -5925,69 +5642,6 @@ impl PeerNetwork { Ok(()) } - /// Static helper to check to see if there has been a reorg - pub fn is_reorg( - last_sort_tip: Option<&BlockSnapshot>, - sort_tip: &BlockSnapshot, - sortdb: &SortitionDB, - ) -> bool { - let Some(last_sort_tip) = last_sort_tip else { - // no prior tip, so no reorg to handle - return false; - }; - - if last_sort_tip.block_height == sort_tip.block_height - && last_sort_tip.consensus_hash == sort_tip.consensus_hash - { - // prior tip and current tip are the same, so no reorg - return false; - } - - if last_sort_tip.block_height == sort_tip.block_height - && last_sort_tip.consensus_hash != sort_tip.consensus_hash - { - // current and previous sortition tips are at the same height, but represent different - // blocks. - debug!( - "Reorg detected at burn height {}: {} != {}", - sort_tip.block_height, &last_sort_tip.consensus_hash, &sort_tip.consensus_hash - ); - return true; - } - - // It will never be the case that the last and current tip have different heights, but the - // smae consensus hash. If they have the same height, then we would have already returned - // since we've handled both the == and != cases for their consensus hashes. So if we reach - // this point, the heights and consensus hashes are not equal. We only need to check that - // last_sort_tip is an ancestor of sort_tip - - let ih = sortdb.index_handle(&sort_tip.sortition_id); - let Ok(Some(ancestor_sn)) = ih.get_block_snapshot_by_height(last_sort_tip.block_height) - else { - // no such ancestor, so it's a reorg - info!( - "Reorg detected: no ancestor of burn block {} ({}) found", - sort_tip.block_height, &sort_tip.consensus_hash - ); - return true; - }; - - if ancestor_sn.consensus_hash != last_sort_tip.consensus_hash { - // ancestor doesn't have the expected consensus hash - info!( - "Reorg detected at burn block {}: ancestor tip at {}: {} != {}", - sort_tip.block_height, - last_sort_tip.block_height, - &ancestor_sn.consensus_hash, - &last_sort_tip.consensus_hash - ); - return true; - } - - // ancestor has expected consensus hash, so no rerog - false - } - /// Top-level main-loop circuit to take. /// -- polls the peer network and http network server sockets to get new sockets and detect ready sockets /// -- carries out network conversations @@ -6081,15 +5735,8 @@ impl PeerNetwork { }) .expect("FATAL: with_network_state should be infallable (not connected)"); - let burnchain_height = indexer - .get_burnchain_headers_height() - // N.B. the indexer reports 1 + num_headers - .map(|burnchain_height| burnchain_height.saturating_sub(1)) - .unwrap_or(self.burnchain_tip.block_height); - self.dispatch_network( &mut network_result, - burnchain_height, sortdb, mempool, chainstate, diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 28ff92ae585..27d59b31237 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -657,7 +657,6 @@ impl Relayer { sort_handle: &mut SortitionHandleConn, chainstate: &mut StacksChainState, block: NakamotoBlock, - coord_comms: Option<&CoordinatorChannels>, ) -> Result { debug!( "Handle incoming Nakamoto block {}/{}", @@ -666,9 +665,8 @@ impl Relayer { ); // do we have this block? don't lock the DB needlessly if so. - if chainstate - .nakamoto_blocks_db() - .has_nakamoto_block(&block.header.block_id())? + if let Some(_) = + NakamotoChainState::get_block_header(chainstate.db(), &block.header.block_id())? { debug!("Already have Nakamoto block {}", &block.header.block_id()); return Ok(false); @@ -745,11 +743,6 @@ impl Relayer { if accepted { debug!("{}", &accept_msg); - if let Some(coord_comms) = coord_comms { - if !coord_comms.announce_new_stacks_block() { - return Err(chainstate_error::NetError(net_error::CoordinatorClosed)); - } - } } else { debug!("{}", &reject_msg); } @@ -757,31 +750,6 @@ impl Relayer { Ok(accepted) } - /// Process nakamoto blocks. - /// Log errors but do not return them. - pub fn process_nakamoto_blocks( - sortdb: &SortitionDB, - chainstate: &mut StacksChainState, - blocks: impl Iterator, - coord_comms: Option<&CoordinatorChannels>, - ) -> Result<(), chainstate_error> { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; - let mut sort_handle = sortdb.index_handle(&tip.sortition_id); - for block in blocks { - let block_id = block.block_id(); - if let Err(e) = Self::process_new_nakamoto_block( - sortdb, - &mut sort_handle, - chainstate, - block, - coord_comms, - ) { - warn!("Failed to process Nakamoto block {}: {:?}", &block_id, &e); - } - } - Ok(()) - } - /// Coalesce a set of microblocks into relayer hints and MicroblocksData messages, as calculated by /// process_new_blocks(). Make sure the messages don't get too big. fn make_microblocksdata_messages( @@ -2109,17 +2077,6 @@ impl Relayer { } }; - let nakamoto_blocks = - std::mem::replace(&mut network_result.nakamoto_blocks, HashMap::new()); - if let Err(e) = Relayer::process_nakamoto_blocks( - sortdb, - chainstate, - nakamoto_blocks.into_values(), - coord_comms, - ) { - warn!("Failed to process Nakamoto blocks: {:?}", &e); - } - let mut mempool_txs_added = vec![]; // only care about transaction forwarding if not IBD @@ -2653,12 +2610,12 @@ pub mod test { use crate::net::asn::*; use crate::net::chat::*; use crate::net::codec::*; + use crate::net::download::test::run_get_blocks_and_microblocks; use crate::net::download::*; use crate::net::http::{HttpRequestContents, HttpRequestPreamble}; use crate::net::httpcore::StacksHttpMessage; use crate::net::inv::inv2x::*; use crate::net::test::*; - use crate::net::tests::download::epoch2x::run_get_blocks_and_microblocks; use crate::net::*; use crate::util_lib::test::*; @@ -5318,7 +5275,6 @@ pub mod test { network_epoch: PEER_VERSION_EPOCH_2_05, }, ]); - let burnchain = peer_config.burnchain.clone(); // activate new AST rules right away let mut peer = TestPeer::new(peer_config); @@ -5404,7 +5360,6 @@ pub mod test { let coinbase_tx = make_coinbase(miner, 0); let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -5469,7 +5424,6 @@ pub mod test { let mblock_privk = miner.next_microblock_privkey(); let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -5494,7 +5448,6 @@ pub mod test { // make a bad block anyway // don't worry about the state root let block_builder = StacksBlockBuilder::make_regtest_block_builder( - &burnchain, &parent_tip, vrf_proof.clone(), tip.total_burn, @@ -5736,7 +5689,6 @@ pub mod test { }, ]; peer_config.epochs = Some(epochs); - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -5794,7 +5746,6 @@ pub mod test { mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrfproof, @@ -5909,7 +5860,6 @@ pub mod test { peer_config.epochs = Some(epochs); peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); @@ -5973,7 +5923,6 @@ pub mod test { mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrfproof, @@ -6090,7 +6039,6 @@ pub mod test { peer_config.epochs = Some(epochs); peer_config.initial_balances = initial_balances; - let burnchain = peer_config.burnchain.clone(); let mut peer = TestPeer::new(peer_config); let versioned_contract_opt: RefCell> = RefCell::new(None); @@ -6160,7 +6108,6 @@ pub mod test { mblock_pubkey_hash_bytes.copy_from_slice(&coinbase_tx.txid()[0..20]); let builder = StacksBlockBuilder::make_block_builder( - &burnchain, chainstate.mainnet, &parent_tip, vrfproof, diff --git a/stackslib/src/net/stackerdb/db.rs b/stackslib/src/net/stackerdb/db.rs index 6cdebb69d96..a1b0db94e2a 100644 --- a/stackslib/src/net/stackerdb/db.rs +++ b/stackslib/src/net/stackerdb/db.rs @@ -28,7 +28,6 @@ use stacks_common::util::get_epoch_time_secs; use stacks_common::util::hash::Sha512Trunc256Sum; use stacks_common::util::secp256k1::MessageSignature; -use super::StackerDBEventDispatcher; use crate::chainstate::stacks::address::PoxAddress; use crate::net::stackerdb::{StackerDBConfig, StackerDBTx, StackerDBs, STACKERDB_INV_MAX}; use crate::net::{Error as net_error, StackerDBChunkData, StackerDBHandshakeData}; @@ -388,20 +387,6 @@ impl<'a> StackerDBTx<'a> { Ok(()) } - /// Try to upload a chunk to the StackerDB instance, notifying - /// and subscribed listeners via the `dispatcher` - pub fn put_chunk( - self, - contract: &QualifiedContractIdentifier, - chunk: StackerDBChunkData, - dispatcher: &ED, - ) -> Result<(), net_error> { - self.try_replace_chunk(contract, &chunk.get_slot_metadata(), &chunk.data)?; - self.commit()?; - dispatcher.new_stackerdb_chunks(contract.clone(), vec![chunk]); - Ok(()) - } - /// Add or replace a chunk for a given reward cycle, if it is valid /// Otherwise, this errors out with Error::StaleChunk pub fn try_replace_chunk( diff --git a/stackslib/src/net/stackerdb/mod.rs b/stackslib/src/net/stackerdb/mod.rs index 7a1b29b2ee0..0213c0f96c8 100644 --- a/stackslib/src/net/stackerdb/mod.rs +++ b/stackslib/src/net/stackerdb/mod.rs @@ -151,7 +151,6 @@ pub const STACKERDB_MAX_PAGE_COUNT: u32 = 2; pub const STACKERDB_SLOTS_FUNCTION: &str = "stackerdb-get-signer-slots"; pub const STACKERDB_CONFIG_FUNCTION: &str = "stackerdb-get-config"; -pub const MINER_SLOT_COUNT: u32 = 2; /// Final result of synchronizing state with a remote set of DB replicas pub struct StackerDBSyncResult { @@ -165,8 +164,6 @@ pub struct StackerDBSyncResult { dead: HashSet, /// neighbors that misbehaved while syncing broken: HashSet, - /// neighbors that have stale views, but are otherwise online - pub(crate) stale: HashSet, } /// Settings for the Stacker DB @@ -388,8 +385,6 @@ pub struct StackerDBSync { /// whether or not we should immediately re-fetch chunks because we learned about new chunks /// from our peers when they replied to our chunk-pushes with new inventory state need_resync: bool, - /// Track stale neighbors - pub(crate) stale_neighbors: HashSet, } impl StackerDBSyncResult { @@ -402,7 +397,6 @@ impl StackerDBSyncResult { chunks_to_store: vec![chunk.chunk_data], dead: HashSet::new(), broken: HashSet::new(), - stale: HashSet::new(), } } } diff --git a/stackslib/src/net/stackerdb/sync.rs b/stackslib/src/net/stackerdb/sync.rs index 2a4232159b4..bf76092a727 100644 --- a/stackslib/src/net/stackerdb/sync.rs +++ b/stackslib/src/net/stackerdb/sync.rs @@ -33,9 +33,9 @@ use crate::net::stackerdb::{ StackerDBConfig, StackerDBSync, StackerDBSyncResult, StackerDBSyncState, StackerDBs, }; use crate::net::{ - Error as net_error, NackData, NackErrorCodes, Neighbor, NeighborAddress, NeighborKey, - StackerDBChunkData, StackerDBChunkInvData, StackerDBGetChunkData, StackerDBGetChunkInvData, - StackerDBPushChunkData, StacksMessageType, + Error as net_error, NackData, Neighbor, NeighborAddress, NeighborKey, StackerDBChunkData, + StackerDBChunkInvData, StackerDBGetChunkData, StackerDBGetChunkInvData, StackerDBPushChunkData, + StacksMessageType, }; const MAX_CHUNKS_IN_FLIGHT: usize = 6; @@ -71,7 +71,6 @@ impl StackerDBSync { total_pushed: 0, last_run_ts: 0, need_resync: false, - stale_neighbors: HashSet::new(), }; dbsync.reset(None, config); dbsync @@ -178,7 +177,6 @@ impl StackerDBSync { chunks_to_store: chunks, dead: self.comms.take_dead_neighbors(), broken: self.comms.take_broken_neighbors(), - stale: std::mem::replace(&mut self.stale_neighbors, HashSet::new()), }; // keep all connected replicas, and replenish from config hints and the DB as needed @@ -247,14 +245,7 @@ impl StackerDBSync { let local_write_timestamps = self .stackerdbs .get_slot_write_timestamps(&self.smart_contract_id)?; - - if local_slot_versions.len() != local_write_timestamps.len() { - // interleaved DB write? - return Err(net_error::Transient( - "Interleaved DB write has led to an inconsistent view of the stackerdb. Try again." - .into(), - )); - } + assert_eq!(local_slot_versions.len(), local_write_timestamps.len()); let mut need_chunks: HashMap)> = HashMap::new(); @@ -276,11 +267,11 @@ impl StackerDBSync { } for (naddr, chunk_inv) in self.chunk_invs.iter() { - if chunk_inv.slot_versions.len() != local_slot_versions.len() { - // need to retry -- our view of the versions got changed through a - // reconfiguration - continue; - } + assert_eq!( + chunk_inv.slot_versions.len(), + local_slot_versions.len(), + "FATAL: did not validate StackerDBChunkInvData" + ); if *local_version >= chunk_inv.slot_versions[i] { // remote peer has same view as local peer, or stale @@ -364,9 +355,11 @@ impl StackerDBSync { for (i, local_version) in local_slot_versions.iter().enumerate() { let mut local_chunk = None; for (naddr, chunk_inv) in self.chunk_invs.iter() { - if chunk_inv.slot_versions.len() != local_slot_versions.len() { - continue; - } + assert_eq!( + chunk_inv.slot_versions.len(), + local_slot_versions.len(), + "FATAL: did not validate StackerDBChunkData" + ); if *local_version <= chunk_inv.slot_versions[i] { // remote peer has same or newer view than local peer @@ -683,7 +676,6 @@ impl StackerDBSync { &network.get_chain_view().rc_consensus_hash, &db_data.rc_consensus_hash ); - self.connected_replicas.remove(&naddr); continue; } db_data @@ -695,10 +687,6 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { - self.stale_neighbors.insert(naddr); - } continue; } x => { @@ -795,14 +783,14 @@ impl StackerDBSync { network: &mut PeerNetwork, ) -> Result { for (naddr, message) in self.comms.collect_replies(network).into_iter() { - let chunk_inv_opt = match message.payload { + let chunk_inv = match message.payload { StacksMessageType::StackerDBChunkInv(data) => { if data.slot_versions.len() != self.num_slots { - info!("{:?}: Received malformed StackerDBChunkInv for {} from {:?}: expected {} chunks, got {}", network.get_local_peer(), &self.smart_contract_id, &naddr, self.num_slots, data.slot_versions.len()); - None - } else { - Some(data) + info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, data.slot_versions.len()); + self.comms.add_broken(network, &naddr); + continue; } + data } StacksMessageType::Nack(data) => { debug!( @@ -811,15 +799,10 @@ impl StackerDBSync { &naddr, data.error_code ); - self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { - self.stale_neighbors.insert(naddr); - } continue; } x => { info!("Received unexpected message {:?}", &x); - self.connected_replicas.remove(&naddr); continue; } }; @@ -828,11 +811,8 @@ impl StackerDBSync { network.get_local_peer(), &naddr ); - - if let Some(chunk_inv) = chunk_inv_opt { - self.chunk_invs.insert(naddr.clone(), chunk_inv); - self.connected_replicas.insert(naddr); - } + self.chunk_invs.insert(naddr.clone(), chunk_inv); + self.connected_replicas.insert(naddr); } if self.comms.count_inflight() > 0 { // not done yet, so blocked @@ -948,14 +928,10 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { - self.stale_neighbors.insert(naddr); - } continue; } x => { info!("Received unexpected message {:?}", &x); - self.connected_replicas.remove(&naddr); continue; } }; @@ -966,6 +942,7 @@ impl StackerDBSync { "Remote neighbor {:?} served an invalid chunk for ID {}", &naddr, data.slot_id ); + self.comms.add_broken(network, &naddr); self.connected_replicas.remove(&naddr); continue; } @@ -1094,9 +1071,6 @@ impl StackerDBSync { data.error_code ); self.connected_replicas.remove(&naddr); - if data.error_code == NackErrorCodes::StaleView { - self.stale_neighbors.insert(naddr); - } continue; } x => { @@ -1108,6 +1082,7 @@ impl StackerDBSync { // must be well-formed if new_chunk_inv.slot_versions.len() != self.num_slots { info!("{:?}: Received malformed StackerDBChunkInv from {:?}: expected {} chunks, got {}", network.get_local_peer(), &naddr, self.num_slots, new_chunk_inv.slot_versions.len()); + self.comms.add_broken(network, &naddr); continue; } diff --git a/stackslib/src/net/stackerdb/tests/sync.rs b/stackslib/src/net/stackerdb/tests/sync.rs index eeb2f5aae52..bcbf584b05d 100644 --- a/stackslib/src/net/stackerdb/tests/sync.rs +++ b/stackslib/src/net/stackerdb/tests/sync.rs @@ -26,12 +26,11 @@ use stacks_common::address::{ AddressHashMode, C32_ADDRESS_VERSION_MAINNET_MULTISIG, C32_ADDRESS_VERSION_MAINNET_SINGLESIG, }; use stacks_common::types::chainstate::{ - BlockHeaderHash, ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + ConsensusHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey}; -use crate::chainstate::burn::db::sortdb::SortitionDB; use crate::net::relay::Relayer; use crate::net::stackerdb::db::SlotValidation; use crate::net::stackerdb::{StackerDBConfig, StackerDBs}; @@ -281,199 +280,6 @@ fn test_stackerdb_replica_2_neighbors_1_chunk() { }) } -#[test] -fn test_stackerdb_replica_2_neighbors_1_chunk_stale_view() { - with_timeout(600, || { - std::env::set_var("STACKS_TEST_DISABLE_EDGE_TRIGGER_TEST", "1"); - let mut peer_1_config = TestPeerConfig::from_port(BASE_PORT); - let mut peer_2_config = TestPeerConfig::from_port(BASE_PORT + 2); - - peer_1_config.allowed = -1; - peer_2_config.allowed = -1; - - // short-lived walks... - peer_1_config.connection_opts.walk_max_duration = 10; - peer_2_config.connection_opts.walk_max_duration = 10; - - // peer 1 crawls peer 2, and peer 2 crawls peer 1 - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - // set up stacker DBs for both peers - let idx_1 = add_stackerdb(&mut peer_1_config, Some(StackerDBConfig::template())); - let idx_2 = add_stackerdb(&mut peer_2_config, Some(StackerDBConfig::template())); - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - // peer 1 gets the DB - setup_stackerdb(&mut peer_1, idx_1, true, 1); - setup_stackerdb(&mut peer_2, idx_2, false, 1); - - // verify that peer 1 got the data - let peer_1_db_chunks = load_stackerdb(&peer_1, idx_1); - assert_eq!(peer_1_db_chunks.len(), 1); - assert_eq!(peer_1_db_chunks[0].0.slot_id, 0); - assert_eq!(peer_1_db_chunks[0].0.slot_version, 1); - assert!(peer_1_db_chunks[0].1.len() > 0); - - // verify that peer 2 did NOT get the data - let peer_2_db_chunks = load_stackerdb(&peer_2, idx_2); - assert_eq!(peer_2_db_chunks.len(), 1); - assert_eq!(peer_2_db_chunks[0].0.slot_id, 0); - assert_eq!(peer_2_db_chunks[0].0.slot_version, 0); - assert!(peer_2_db_chunks[0].1.len() == 0); - - let peer_1_db_configs = peer_1.config.get_stacker_db_configs(); - let peer_2_db_configs = peer_2.config.get_stacker_db_configs(); - - // force peer 2 to have a stale view - let (old_tip_ch, old_tip_bh) = { - let sortdb = peer_1.sortdb(); - let (tip_bh, tip_ch) = - SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); - SortitionDB::set_canonical_stacks_chain_tip( - sortdb.conn(), - &ConsensusHash([0x22; 20]), - &BlockHeaderHash([0x33; 32]), - 45, - ) - .unwrap(); - (tip_bh, tip_ch) - }; - - let mut i = 0; - let mut peer_1_stale = false; - let mut peer_2_stale = false; - loop { - // run peer network state-machines - peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); - peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); - - let res_1 = peer_1.step_with_ibd(false); - let res_2 = peer_2.step_with_ibd(false); - - if let Ok(mut res) = res_1 { - for sync_res in res.stacker_db_sync_results.iter() { - assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { - peer_1_stale = true; - } - } - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); - } - - if let Ok(mut res) = res_2 { - for sync_res in res.stacker_db_sync_results.iter() { - assert_eq!(sync_res.chunks_to_store.len(), 0); - if sync_res.stale.len() > 0 { - peer_2_stale = true; - } - } - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); - } - - if peer_1_stale && peer_2_stale { - break; - } - - i += 1; - } - - debug!("Completed stacker DB stale detection in {} step(s)", i); - - // fix and re-run - { - let sortdb = peer_1.sortdb(); - SortitionDB::set_canonical_stacks_chain_tip(sortdb.conn(), &old_tip_ch, &old_tip_bh, 0) - .unwrap(); - - // force chain view refresh - peer_1.network.num_state_machine_passes = 0; - } - - let mut i = 0; - loop { - // run peer network state-machines - peer_1.network.stacker_db_configs = peer_1_db_configs.clone(); - peer_2.network.stacker_db_configs = peer_2_db_configs.clone(); - - let res_1 = peer_1.step_with_ibd(false); - let res_2 = peer_2.step_with_ibd(false); - - if let Ok(mut res) = res_1 { - Relayer::process_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_1.network.stackerdbs, - &peer_1_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); - } - - if let Ok(mut res) = res_2 { - Relayer::process_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - res.stacker_db_sync_results, - None, - ) - .unwrap(); - Relayer::process_pushed_stacker_db_chunks( - &mut peer_2.network.stackerdbs, - &peer_2_db_configs, - &mut res.unhandled_messages, - None, - ) - .unwrap(); - } - - let db1 = load_stackerdb(&peer_1, idx_1); - let db2 = load_stackerdb(&peer_2, idx_2); - - if db1 == db2 { - break; - } - i += 1; - } - - debug!("Completed stacker DB sync in {} step(s)", i); - }) -} - #[test] #[ignore] fn test_stackerdb_replica_2_neighbors_10_chunks() { diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs deleted file mode 100644 index 5e9ea0daf2b..00000000000 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ /dev/null @@ -1,1512 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::HashMap; - -use clarity::vm::clarity::ClarityConnection; -use clarity::vm::costs::ExecutionCost; -use clarity::vm::execute; -use clarity::vm::representations::*; -use rand::Rng; -use stacks_common::util::hash::*; -use stacks_common::util::sleep_ms; -use stacks_common::util::vrf::VRFProof; - -use super::*; -use crate::burnchains::tests::TestMiner; -use crate::chainstate::burn::db::sortdb::*; -use crate::chainstate::burn::operations::*; -use crate::chainstate::stacks::db::blocks::MINIMUM_TX_FEE_RATE_PER_BYTE; -use crate::chainstate::stacks::miner::*; -use crate::chainstate::stacks::tests::*; -use crate::chainstate::stacks::*; -use crate::net::codec::*; -use crate::net::download::BlockDownloader; -use crate::net::inv::inv2x::*; -use crate::net::relay::*; -use crate::net::test::*; -use crate::net::*; -use crate::stacks_common::types::PublicKey; -use crate::util_lib::strings::*; -use crate::util_lib::test::*; - -fn get_peer_availability( - peer: &mut TestPeer, - start_height: u64, - end_height: u64, -) -> Vec<(ConsensusHash, Option, Vec)> { - let inv_state = peer.network.inv_state.take().unwrap(); - let availability = peer - .with_network_state( - |ref mut sortdb, - ref mut _chainstate, - ref mut network, - ref mut _relayer, - ref mut _mempool| { - BlockDownloader::get_block_availability( - &network.local_peer, - &inv_state, - sortdb, - &mut network.header_cache, - start_height, - end_height, - ) - }, - ) - .unwrap(); - peer.network.inv_state = Some(inv_state); - availability -} - -#[test] -fn test_get_block_availability() { - with_timeout(600, || { - let mut peer_1_config = TestPeerConfig::new(function_name!(), 3210, 3211); - let mut peer_2_config = TestPeerConfig::new(function_name!(), 3212, 3213); - - // don't bother downloading blocks - peer_1_config.connection_opts.disable_block_download = true; - peer_2_config.connection_opts.disable_block_download = true; - - peer_1_config.add_neighbor(&peer_2_config.to_neighbor()); - peer_2_config.add_neighbor(&peer_1_config.to_neighbor()); - - let reward_cycle_length = peer_1_config.burnchain.pox_constants.reward_cycle_length as u64; - - let mut peer_1 = TestPeer::new(peer_1_config); - let mut peer_2 = TestPeer::new(peer_2_config); - - let num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let mut block_data = vec![]; - - for i in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peer_2.make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peer_2.next_burnchain_block(burn_ops.clone()); - peer_2.process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peer_1.next_burnchain_block_raw(burn_ops); - - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peer_2.sortdb.as_ref().unwrap().conn()) - .unwrap(); - block_data.push((sn.consensus_hash.clone(), stacks_block, microblocks)); - } - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer_1.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - peer_1.config.burnchain.first_block_height - }; - - let mut round = 0; - let mut inv_1_count = 0; - let mut inv_2_count = 0; - let mut all_blocks_available = false; - - // can only learn about 1 reward cycle's blocks at a time in PoX - while inv_1_count < reward_cycle_length - && inv_2_count < reward_cycle_length - && !all_blocks_available - { - let result_1 = peer_1.step(); - let result_2 = peer_2.step(); - - inv_1_count = match peer_1.network.inv_state { - Some(ref inv) => { - let mut count = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - - // continue until peer 1 knows that peer 2 has blocks - let peer_1_availability = get_peer_availability( - &mut peer_1, - first_stacks_block_height, - first_stacks_block_height + reward_cycle_length, - ); - - let mut all_availability = true; - for (_, _, neighbors) in peer_1_availability.iter() { - if neighbors.len() != 1 { - // not done yet - count = 0; - all_availability = false; - break; - } - assert_eq!(neighbors[0], peer_2.config.to_neighbor().addr); - } - - all_blocks_available = all_availability; - - count - } - None => 0, - }; - - inv_2_count = match peer_2.network.inv_state { - Some(ref inv) => inv.get_inv_sortitions(&peer_1.to_neighbor().addr), - None => 0, - }; - - // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} - } - - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} - } - - round += 1; - } - - info!("Completed walk round {} step(s)", round); - - let availability = get_peer_availability( - &mut peer_1, - first_stacks_block_height, - first_stacks_block_height + reward_cycle_length, - ); - - eprintln!("availability.len() == {}", availability.len()); - eprintln!("block_data.len() == {}", block_data.len()); - - assert_eq!(availability.len() as u64, reward_cycle_length); - assert_eq!(block_data.len() as u64, num_blocks); - - for ( - (sn_consensus_hash, stacks_block, microblocks), - (consensus_hash, stacks_block_hash_opt, neighbors), - ) in block_data.iter().zip(availability.iter()) - { - assert_eq!(*consensus_hash, *sn_consensus_hash); - assert!(stacks_block_hash_opt.is_some()); - assert_eq!(*stacks_block_hash_opt, Some(stacks_block.block_hash())); - } - }) -} - -fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { - let block_hashes = { - let num_headers = end_height - start_height; - let ic = peer.sortdb.as_mut().unwrap().index_conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); - let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) - .unwrap() - .unwrap(); - ic.get_stacks_header_hashes( - num_headers + 1, - &ancestor.consensus_hash, - &mut BlockHeaderCache::new(), - ) - .unwrap() - }; - - let inv = peer - .chainstate() - .get_blocks_inventory(&block_hashes) - .unwrap(); - inv -} - -pub fn run_get_blocks_and_microblocks( - test_name: &str, - port_base: u16, - num_peers: usize, - make_topology: T, - block_generator: F, - mut peer_func: P, - mut check_breakage: C, - mut done_func: D, -) -> Vec -where - T: FnOnce(&mut Vec) -> (), - F: FnOnce( - usize, - &mut Vec, - ) -> Vec<( - ConsensusHash, - Option, - Option>, - )>, - P: FnMut(&mut Vec) -> (), - C: FnMut(&mut TestPeer) -> bool, - D: FnMut(&mut Vec) -> bool, -{ - assert!(num_peers > 0); - let first_sortition_height = 0; - - let mut peer_configs = vec![]; - for i in 0..num_peers { - let mut peer_config = TestPeerConfig::new( - test_name, - port_base + ((2 * i) as u16), - port_base + ((2 * i + 1) as u16), - ); - peer_config.burnchain.first_block_height = first_sortition_height; - - peer_configs.push(peer_config); - } - - make_topology(&mut peer_configs); - - let mut peers = vec![]; - for conf in peer_configs.drain(..) { - let peer = TestPeer::new(conf); - peers.push(peer); - } - - let mut num_blocks = 10; - let first_stacks_block_height = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(&peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let block_data = block_generator(num_blocks, &mut peers); - num_blocks = block_data.len(); - - let num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peers[0].sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - let mut dns_clients = vec![]; - let mut dns_threads = vec![]; - - for _ in 0..peers.len() { - let (dns_client, dns_thread_handle) = dns_thread_start(100); - dns_clients.push(dns_client); - dns_threads.push(dns_thread_handle); - } - - let mut round = 0; - let mut peer_invs = vec![BlocksInvData::empty(); num_peers]; - - let mut done = false; - - loop { - peer_func(&mut peers); - - let mut peers_behind_burnchain = false; - for i in 0..peers.len() { - let peer = &mut peers[i]; - - test_debug!("======= peer {} step begin =========", i); - let mut result = peer.step_dns(&mut dns_clients[i]).unwrap(); - - let lp = peer.network.local_peer.clone(); - peer.with_db_state(|sortdb, chainstate, relayer, mempool| { - relayer.process_network_result( - &lp, - &mut result, - sortdb, - chainstate, - mempool, - false, - None, - None, - ) - }) - .unwrap(); - - test_debug!( - "Peer {} processes {} blocks and {} microblock streams", - i, - result.blocks.len(), - result.confirmed_microblocks.len() - ); - - peer.with_peer_state(|peer, sortdb, chainstate, mempool| { - for i in 0..(result.blocks.len() + result.confirmed_microblocks.len() + 1) { - peer.coord.handle_new_stacks_block().unwrap(); - - let pox_id = { - let ic = sortdb.index_conn(); - let tip_sort_id = - SortitionDB::get_canonical_sortition_tip(sortdb.conn()).unwrap(); - let sortdb_reader = - SortitionHandleConn::open_reader(&ic, &tip_sort_id).unwrap(); - sortdb_reader.get_pox_id().unwrap() - }; - - test_debug!( - "\n\n{:?}: after stacks block, new tip PoX ID is {:?}\n\n", - &peer.to_neighbor().addr, - &pox_id - ); - } - Ok(()) - }) - .unwrap(); - - assert!(check_breakage(peer)); - - let peer_num_burn_blocks = { - let sn = - SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) - .unwrap(); - sn.block_height - }; - - peer_invs[i] = get_blocks_inventory(peer, 0, peer_num_burn_blocks); - peers_behind_burnchain = - peer_num_burn_blocks != num_burn_blocks || peers_behind_burnchain; - - test_debug!("Peer {} block inventory: {:?}", i, &peer_invs[i]); - - if let Some(ref inv) = peer.network.inv_state { - test_debug!("Peer {} inventory stats: {:?}", i, &inv.block_stats); - } - - let (mut inbound, mut outbound) = peer.network.dump_peer_table(); - - inbound.sort(); - outbound.sort(); - - test_debug!( - "Peer {} outbound ({}): {}", - i, - outbound.len(), - outbound.join(", ") - ); - test_debug!( - "Peer {} inbound ({}): {}", - i, - inbound.len(), - inbound.join(", ") - ); - test_debug!("======= peer {} step end =========", i); - } - - if !done { - done = !peers_behind_burnchain; - - for i in 0..num_peers { - for b in 0..num_blocks { - if !peer_invs[i].has_ith_block( - ((b as u64) + first_stacks_block_height - first_sortition_height) as u16, - ) { - if block_data[b].1.is_some() { - test_debug!( - "Peer {} is missing block {} at sortition height {} (between {} and {})", - i, - b, - (b as u64) + first_stacks_block_height - first_sortition_height, - first_stacks_block_height - first_sortition_height, - first_stacks_block_height - first_sortition_height - + (num_blocks as u64), - ); - done = false; - } - } - } - for b in 1..(num_blocks - 1) { - if !peer_invs[i].has_ith_microblock_stream( - ((b as u64) + first_stacks_block_height - first_sortition_height) as u16, - ) { - if block_data[b].2.is_some() { - test_debug!( - "Peer {} is missing microblock stream {} (between {} and {})", - i, - (b as u64) + first_stacks_block_height - first_sortition_height, - first_stacks_block_height - first_sortition_height, - first_stacks_block_height - first_sortition_height - + ((num_blocks - 1) as u64), - ); - done = false; - } - } - } - } - } - for (i, peer) in peers.iter().enumerate() { - test_debug!( - "Peer {} has done {} p2p state-machine passes; {} inv syncs, {} download-syncs", - i, - peer.network.num_state_machine_passes, - peer.network.num_inv_sync_passes, - peer.network.num_downloader_passes - ); - } - - if done { - // all blocks obtained, now do custom check - if done_func(&mut peers) { - break; - } - } - - round += 1; - } - - info!("Completed walk round {} step(s)", round); - - let mut peer_invs = vec![]; - for peer in peers.iter_mut() { - let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks); - peer_invs.push(peer_inv); - - let availability = get_peer_availability( - peer, - first_stacks_block_height - first_sortition_height, - first_stacks_block_height - first_sortition_height + (num_blocks as u64), - ); - - assert_eq!(availability.len(), num_blocks); - assert_eq!(block_data.len(), num_blocks); - - for ( - (sn_consensus_hash, stacks_block_opt, microblocks_opt), - (consensus_hash, stacks_block_hash_opt, neighbors), - ) in block_data.iter().zip(availability.iter()) - { - assert_eq!(*consensus_hash, *sn_consensus_hash); - - if stacks_block_hash_opt.is_some() { - assert!(stacks_block_opt.is_some()); - assert_eq!( - *stacks_block_hash_opt, - Some(stacks_block_opt.as_ref().unwrap().block_hash()) - ); - } else { - assert!(stacks_block_opt.is_none()); - } - } - } - - drop(dns_clients); - for handle in dns_threads.drain(..) { - handle.join().unwrap(); - } - - peers -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - function_name!(), - 3200, - 2, - |ref mut peer_configs| { - // build initial network topology - assert_eq!(peer_configs.len(), 2); - - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[1].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[1].next_burnchain_block(burn_ops.clone()); - peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peers[0].next_burnchain_block_raw(burn_ops); - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - - // no block advertisements (should be disabled) - let _ = peer.for_each_convo_p2p(|event_id, convo| { - let cnt = *(convo - .stats - .msg_rx_counts - .get(&StacksMessageID::BlocksAvailable) - .unwrap_or(&0)); - assert_eq!( - cnt, 0, - "neighbor event={} got {} BlocksAvailable messages", - event_id, cnt - ); - Ok(()) - }); - - true - }, - |_| true, - ); - }) -} - -fn make_contract_call_transaction( - miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - spending_account: &mut TestMiner, - contract_address: StacksAddress, - contract_name: &str, - function_name: &str, - args: Vec, - consensus_hash: &ConsensusHash, - block_hash: &BlockHeaderHash, - nonce_offset: u64, -) -> StacksTransaction { - let tx_cc = { - let mut tx_cc = StacksTransaction::new( - TransactionVersion::Testnet, - spending_account.as_transaction_auth().unwrap().into(), - TransactionPayload::new_contract_call( - contract_address, - contract_name, - function_name, - args, - ) - .unwrap(), - ); - - let chain_tip = StacksBlockHeader::make_index_block_hash(consensus_hash, block_hash); - let cur_nonce = chainstate - .with_read_only_clarity_tx(&sortdb.index_conn(), &chain_tip, |clarity_tx| { - clarity_tx.with_clarity_db_readonly(|clarity_db| { - clarity_db - .get_account_nonce(&spending_account.origin_address().unwrap().into()) - .unwrap() - }) - }) - .unwrap() - + nonce_offset; - - test_debug!( - "Nonce of {:?} is {} (+{}) at {}/{}", - &spending_account.origin_address().unwrap(), - cur_nonce, - nonce_offset, - consensus_hash, - block_hash - ); - - tx_cc.chain_id = 0x80000000; - tx_cc.auth.set_origin_nonce(cur_nonce); - tx_cc.set_tx_fee(MINIMUM_TX_FEE_RATE_PER_BYTE * 500); - - let mut tx_signer = StacksTransactionSigner::new(&tx_cc); - spending_account.sign_as_origin(&mut tx_signer); - - let tx_cc_signed = tx_signer.get_tx().unwrap(); - - test_debug!( - "make transaction {:?} off of {:?}/{:?}: {:?}", - &tx_cc_signed.txid(), - consensus_hash, - block_hash, - &tx_cc_signed - ); - - spending_account.set_nonce(cur_nonce + 1); - tx_cc_signed - }; - - tx_cc -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { - // 20 reward cycles - with_timeout(600, || { - run_get_blocks_and_microblocks( - "test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks", - 32100, - 2, - |ref mut peer_configs| { - // build initial network topology - assert_eq!(peer_configs.len(), 2); - - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - - // peer[1] has a big initial balance - let initial_balances = vec![( - PrincipalData::from(peer_configs[1].spending_account.origin_address().unwrap()), - 1_000_000_000_000_000, - )]; - - peer_configs[0].initial_balances = initial_balances.clone(); - peer_configs[1].initial_balances = initial_balances; - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - let spending_account = &mut peers[1].config.spending_account.clone(); - let burnchain = peers[1].config.burnchain.clone(); - - // function to make a tenure in which a the peer's miner stacks its STX - let mut make_stacking_tenure = |miner: &mut TestMiner, - sortdb: &mut SortitionDB, - chainstate: &mut StacksChainState, - vrfproof: VRFProof, - parent_opt: Option<&StacksBlock>, - microblock_parent_opt: Option< - &StacksMicroblockHeader, - >| { - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - - let stacks_tip_opt = - NakamotoChainState::get_canonical_block_header(chainstate.db(), sortdb) - .unwrap(); - let parent_tip = match stacks_tip_opt { - None => StacksChainState::get_genesis_header_info(chainstate.db()).unwrap(), - Some(header) => { - let ic = sortdb.index_conn(); - let snapshot = - SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &header.anchored_header.block_hash(), - ) - .unwrap() - .unwrap(); // succeeds because we don't fork - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &snapshot.consensus_hash, - &snapshot.winning_stacks_block_hash, - ) - .unwrap() - .unwrap() - } - }; - - let parent_header_hash = parent_tip.anchored_header.block_hash(); - let parent_consensus_hash = parent_tip.consensus_hash.clone(); - let parent_index_hash = StacksBlockHeader::make_index_block_hash( - &parent_consensus_hash, - &parent_header_hash, - ); - - let coinbase_tx = make_coinbase_with_nonce( - miner, - parent_tip.stacks_block_height as usize, - miner.get_nonce(), - None, - ); - - let stack_tx = make_contract_call_transaction( - miner, - sortdb, - chainstate, - spending_account, - StacksAddress::burn_address(false), - "pox", - "stack-stx", - vec![ - Value::UInt(1_000_000_000_000_000 / 2), - execute("{ version: 0x00, hashbytes: 0x1000000010000000100000010000000100000001 }").unwrap().unwrap(), - Value::UInt((tip.block_height + 1) as u128), - Value::UInt(12) - ], - &parent_consensus_hash, - &parent_header_hash, - 0 - ); - - let mblock_tx = make_contract_call_transaction( - miner, - sortdb, - chainstate, - spending_account, - StacksAddress::burn_address(false), - "pox", - "get-pox-info", - vec![], - &parent_consensus_hash, - &parent_header_hash, - 4, - ); - - let mblock_privkey = StacksPrivateKey::new(); - - let mblock_pubkey_hash_bytes = Hash160::from_data( - &StacksPublicKey::from_private(&mblock_privkey).to_bytes(), - ); - - let mut builder = StacksBlockBuilder::make_block_builder( - &burnchain, - chainstate.mainnet, - &parent_tip, - vrfproof, - tip.total_burn, - mblock_pubkey_hash_bytes, - ) - .unwrap(); - builder.set_microblock_privkey(mblock_privkey); - - let (anchored_block, _size, _cost, microblock_opt) = - StacksBlockBuilder::make_anchored_block_and_microblock_from_txs( - builder, - chainstate, - &sortdb.index_conn(), - vec![coinbase_tx, stack_tx], - vec![mblock_tx], - ) - .unwrap(); - - (anchored_block, vec![microblock_opt.unwrap()]) - }; - - for i in 0..50 { - let (mut burn_ops, stacks_block, microblocks) = if i == 1 { - peers[1].make_tenure(&mut make_stacking_tenure) - } else { - peers[1].make_default_tenure() - }; - - let (_, burn_header_hash, consensus_hash) = - peers[1].next_burnchain_block(burn_ops.clone()); - peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peers[0].next_burnchain_block_raw(burn_ops); - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - - // no block advertisements (should be disabled) - let _ = peer.for_each_convo_p2p(|event_id, convo| { - let cnt = *(convo - .stats - .msg_rx_counts - .get(&StacksMessageID::BlocksAvailable) - .unwrap_or(&0)); - assert_eq!( - cnt, 0, - "neighbor event={} got {} BlocksAvailable messages", - event_id, cnt - ); - Ok(()) - }); - - true - }, - |_| true, - ); - }) -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_5_peers_star() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - function_name!(), - 3210, - 5, - |ref mut peer_configs| { - // build initial network topology -- a star with - // peers[0] at the center, with all the blocks - assert_eq!(peer_configs.len(), 5); - let mut neighbors = vec![]; - - for p in peer_configs.iter_mut() { - p.connection_opts.disable_block_advertisement = true; - p.connection_opts.max_clients_per_host = 30; - } - - let peer_0 = peer_configs[0].to_neighbor(); - for i in 1..peer_configs.len() { - neighbors.push(peer_configs[i].to_neighbor()); - peer_configs[i].add_neighbor(&peer_0); - } - - for n in neighbors.drain(..) { - peer_configs[0].add_neighbor(&n); - } - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - true - }, - |_| true, - ); - }) -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_5_peers_line() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - function_name!(), - 3220, - 5, - |ref mut peer_configs| { - // build initial network topology -- a line with - // peers[0] at the left, with all the blocks - assert_eq!(peer_configs.len(), 5); - let mut neighbors = vec![]; - - for p in peer_configs.iter_mut() { - p.connection_opts.disable_block_advertisement = true; - p.connection_opts.max_clients_per_host = 30; - } - - for i in 0..peer_configs.len() { - neighbors.push(peer_configs[i].to_neighbor()); - } - - for i in 0..peer_configs.len() - 1 { - peer_configs[i].add_neighbor(&neighbors[i + 1]); - peer_configs[i + 1].add_neighbor(&neighbors[i]); - } - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - true - }, - |_| true, - ); - }) -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - function_name!(), - 3230, - 5, - |ref mut peer_configs| { - // build initial network topology -- a star with - // peers[0] at the center, with all the blocks - assert_eq!(peer_configs.len(), 5); - let mut neighbors = vec![]; - - for p in peer_configs.iter_mut() { - p.connection_opts.disable_block_advertisement = true; - } - - let peer_0 = peer_configs[0].to_neighbor(); - - for i in 1..peer_configs.len() { - neighbors.push(peer_configs[i].to_neighbor()); - peer_configs[i].add_neighbor(&peer_0); - - // severely restrict the number of allowed - // connections in each peer - peer_configs[i].connection_opts.max_clients_per_host = 1; - peer_configs[i].connection_opts.num_clients = 1; - peer_configs[i].connection_opts.idle_timeout = 1; - peer_configs[i].connection_opts.max_http_clients = 1; - } - - for n in neighbors.drain(..) { - peer_configs[0].add_neighbor(&n); - } - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - true - }, - |_| true, - ); - }) -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { - // this one can go for a while - with_timeout(1200, || { - run_get_blocks_and_microblocks( - function_name!(), - 3240, - 5, - |ref mut peer_configs| { - // build initial network topology -- a star with - // peers[0] at the center, with all the blocks - assert_eq!(peer_configs.len(), 5); - let mut neighbors = vec![]; - - for p in peer_configs.iter_mut() { - p.connection_opts.disable_block_advertisement = true; - } - - let peer_0 = peer_configs[0].to_neighbor(); - - for i in 1..peer_configs.len() { - neighbors.push(peer_configs[i].to_neighbor()); - peer_configs[i].add_neighbor(&peer_0); - - // severely restrict the number of events - peer_configs[i].connection_opts.max_sockets = 10; - } - - for n in neighbors.drain(..) { - peer_configs[0].add_neighbor(&n); - } - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[0].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[0].next_burnchain_block(burn_ops.clone()); - peers[0].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - for i in 1..peers.len() { - peers[i].next_burnchain_block_raw(burn_ops.clone()); - } - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[0].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - true - }, - |_| true, - ); - }) -} - -#[test] -#[ignore] -#[should_panic(expected = "blocked URL")] -pub fn test_get_blocks_and_microblocks_ban_url() { - use std::net::TcpListener; - use std::thread; - - let listener_1 = TcpListener::bind("127.0.0.1:3260").unwrap(); - let listener_2 = TcpListener::bind("127.0.0.1:3262").unwrap(); - - let endpoint_thread_1 = thread::spawn(move || { - let (sock, addr) = listener_1.accept().unwrap(); - test_debug!("Accepted 1 {:?}", &addr); - sleep_ms(60_000); - }); - - let endpoint_thread_2 = thread::spawn(move || { - let (sock, addr) = listener_2.accept().unwrap(); - test_debug!("Accepted 2 {:?}", &addr); - sleep_ms(60_000); - }); - - run_get_blocks_and_microblocks( - function_name!(), - 3250, - 2, - |ref mut peer_configs| { - // build initial network topology - assert_eq!(peer_configs.len(), 2); - - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - // announce URLs to our fake handlers - peer_configs[0].data_url = - UrlString::try_from("http://127.0.0.1:3260".to_string()).unwrap(); - peer_configs[1].data_url = - UrlString::try_from("http://127.0.0.1:3262".to_string()).unwrap(); - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - // build up block data to replicate - let mut block_data = vec![]; - for _ in 0..num_blocks { - let (mut burn_ops, stacks_block, microblocks) = peers[1].make_default_tenure(); - - let (_, burn_header_hash, consensus_hash) = - peers[1].next_burnchain_block(burn_ops.clone()); - peers[1].process_stacks_epoch_at_tip(&stacks_block, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peers[0].next_burnchain_block_raw(burn_ops); - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } - block_data - }, - |_| {}, - |peer| { - let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} - } - if blocked >= 1 { - // NOTE: this is the success criterion - panic!("blocked URL"); - } - true - }, - |_| true, - ); - - endpoint_thread_1.join().unwrap(); - endpoint_thread_2.join().unwrap(); -} - -#[test] -#[ignore] -pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_descendants() { - with_timeout(600, || { - run_get_blocks_and_microblocks( - function_name!(), - 3260, - 2, - |ref mut peer_configs| { - // build initial network topology - assert_eq!(peer_configs.len(), 2); - - peer_configs[0].connection_opts.disable_block_advertisement = true; - peer_configs[1].connection_opts.disable_block_advertisement = true; - - let peer_0 = peer_configs[0].to_neighbor(); - let peer_1 = peer_configs[1].to_neighbor(); - peer_configs[0].add_neighbor(&peer_1); - peer_configs[1].add_neighbor(&peer_0); - }, - |num_blocks, ref mut peers| { - // build up block data to replicate. - // chainstate looks like this: - // - // [tenure-1] <- [mblock] <- [mblock] <- [mblock] <- [mblock] <- ... - // \ \ \ \ - // \ \ \ \ - // [tenure-2] [tenure-3] [tenure-4] [tenure-5] ... - // - let mut block_data = vec![]; - let mut microblock_stream = vec![]; - let mut first_block_height = 0; - for i in 0..num_blocks { - if i == 0 { - let (mut burn_ops, stacks_block, mut microblocks) = - peers[1].make_default_tenure(); - - // extend to 10 microblocks - while microblocks.len() != num_blocks { - let next_microblock_payload = TransactionPayload::SmartContract( - TransactionSmartContract { - name: ContractName::try_from(format!( - "hello-world-{}", - thread_rng().gen::() - )) - .expect("FATAL: valid name"), - code_body: StacksString::from_str( - "(begin (print \"hello world\"))", - ) - .expect("FATAL: valid code"), - }, - None, - ); - let mut mblock = microblocks.last().unwrap().clone(); - let last_nonce = mblock - .txs - .last() - .as_ref() - .unwrap() - .auth() - .get_origin_nonce(); - let prev_block = mblock.block_hash(); - - let signed_tx = sign_standard_singlesig_tx( - next_microblock_payload, - &peers[1].miner.privks[0], - last_nonce + 1, - 0, - ); - let txids = vec![signed_tx.txid().as_bytes().to_vec()]; - let merkle_tree = MerkleTree::::new(&txids); - let tx_merkle_root = merkle_tree.root(); - - mblock.txs = vec![signed_tx]; - mblock.header.tx_merkle_root = tx_merkle_root; - mblock.header.prev_block = prev_block; - mblock.header.sequence += 1; - mblock - .header - .sign(peers[1].miner.microblock_privks.last().as_ref().unwrap()) - .unwrap(); - - microblocks.push(mblock); - } - - let (_, burn_header_hash, consensus_hash) = - peers[1].next_burnchain_block(burn_ops.clone()); - - peers[1].process_stacks_epoch(&stacks_block, &consensus_hash, µblocks); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peers[0].next_burnchain_block_raw(burn_ops); - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - microblock_stream = microblocks.clone(); - first_block_height = sn.block_height as u32; - - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(microblocks), - )); - } else { - test_debug!("Build child block {}", i); - let tip = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - let chainstate_path = peers[1].chainstate_path.clone(); - let burnchain = peers[1].config.burnchain.clone(); - - let (mut burn_ops, stacks_block, _) = peers[1].make_tenure( - |ref mut miner, - ref mut sortdb, - ref mut chainstate, - vrf_proof, - ref parent_opt, - ref parent_microblock_header_opt| { - let mut parent_tip = - StacksChainState::get_anchored_block_header_info( - chainstate.db(), - &block_data[0].0, - &block_data[0].1.as_ref().unwrap().block_hash(), - ) - .unwrap() - .unwrap(); - - parent_tip.microblock_tail = - Some(microblock_stream[i - 1].header.clone()); - - let mut mempool = - MemPoolDB::open_test(false, 0x80000000, &chainstate_path) - .unwrap(); - let coinbase_tx = - make_coinbase_with_nonce(miner, i, (i + 2) as u64, None); - - let (anchored_block, block_size, block_execution_cost) = - StacksBlockBuilder::build_anchored_block( - chainstate, - &sortdb.index_conn(), - &mut mempool, - &parent_tip, - parent_tip - .anchored_header - .as_stacks_epoch2() - .unwrap() - .total_work - .burn - + 1000, - vrf_proof, - Hash160([i as u8; 20]), - &coinbase_tx, - BlockBuilderSettings::max_value(), - None, - &burnchain, - ) - .unwrap(); - (anchored_block, vec![]) - }, - ); - - for burn_op in burn_ops.iter_mut() { - if let BlockstackOperationType::LeaderBlockCommit(ref mut op) = burn_op - { - op.parent_block_ptr = first_block_height; - op.block_header_hash = stacks_block.block_hash(); - } - } - - let (_, burn_header_hash, consensus_hash) = - peers[1].next_burnchain_block(burn_ops.clone()); - - peers[1].process_stacks_epoch(&stacks_block, &consensus_hash, &vec![]); - - TestPeer::set_ops_burn_header_hash(&mut burn_ops, &burn_header_hash); - - peers[0].next_burnchain_block_raw(burn_ops); - - let sn = SortitionDB::get_canonical_burn_chain_tip( - &peers[1].sortdb.as_ref().unwrap().conn(), - ) - .unwrap(); - - block_data.push(( - sn.consensus_hash.clone(), - Some(stacks_block), - Some(vec![]), - )); - } - } - block_data - }, - |_| {}, - |peer| { - // check peer health - // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} - } - - // no block advertisements (should be disabled) - let _ = peer.for_each_convo_p2p(|event_id, convo| { - let cnt = *(convo - .stats - .msg_rx_counts - .get(&StacksMessageID::BlocksAvailable) - .unwrap_or(&0)); - assert_eq!( - cnt, 0, - "neighbor event={} got {} BlocksAvailable messages", - event_id, cnt - ); - Ok(()) - }); - - true - }, - |_| true, - ); - }) -} diff --git a/stackslib/src/net/tests/download/mod.rs b/stackslib/src/net/tests/download/mod.rs deleted file mode 100644 index 430b92e4144..00000000000 --- a/stackslib/src/net/tests/download/mod.rs +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -pub mod epoch2x; -pub mod nakamoto; diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs deleted file mode 100644 index 73472c9c565..00000000000 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ /dev/null @@ -1,2240 +0,0 @@ -// Copyright (C) 2013-2020 Blockstack PBC, a public benefit corporation -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::collections::HashMap; -use std::sync::mpsc::sync_channel; -use std::thread; - -use stacks_common::bitvec::BitVec; -use stacks_common::types::chainstate::{ - ConsensusHash, StacksAddress, StacksBlockId, StacksPrivateKey, TrieHash, -}; -use stacks_common::types::net::PeerAddress; -use stacks_common::types::StacksEpochId; -use stacks_common::util::get_epoch_time_secs; -use stacks_common::util::hash::{hex_bytes, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::MessageSignature; -use stacks_common::util::vrf::VRFProof; - -use crate::burnchains::PoxConstants; -use crate::chainstate::burn::BlockSnapshot; -use crate::chainstate::nakamoto::test_signers::TestSigners; -use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoChainState}; -use crate::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; -use crate::chainstate::stacks::{ - CoinbasePayload, StacksTransaction, TenureChangeCause, TenureChangePayload, ThresholdSignature, - TokenTransferMemo, TransactionAnchorMode, TransactionAuth, TransactionPayload, - TransactionVersion, -}; -use crate::clarity::vm::types::StacksAddressExtensions; -use crate::net::api::gettenureinfo::RPCGetTenureInfo; -use crate::net::download::nakamoto::{TenureStartEnd, WantedTenure, *}; -use crate::net::inv::nakamoto::NakamotoTenureInv; -use crate::net::test::{dns_thread_start, TestEventObserver}; -use crate::net::tests::inv::nakamoto::{make_nakamoto_peer_from_invs, peer_get_nakamoto_invs}; -use crate::net::tests::{NakamotoBootPlan, TestPeer}; -use crate::net::{Error as NetError, Hash160, NeighborAddress, SortitionDB}; -use crate::stacks_common::types::Address; -use crate::util_lib::db::Error as DBError; - -impl NakamotoDownloadStateMachine { - /// Find the list of wanted tenures for the given reward cycle. The reward cycle must - /// be complete already. Used for testing. - /// - /// Returns a reward cycle's wanted tenures. - /// Returns a DB error if the snapshot does not correspond to a full reward cycle. - #[cfg(test)] - pub(crate) fn load_wanted_tenures_for_reward_cycle( - cur_rc: u64, - tip: &BlockSnapshot, - sortdb: &SortitionDB, - ) -> Result, NetError> { - // careful -- need .saturating_sub(1) since this calculation puts the reward cycle start at - // block height 1 mod reward cycle len, but we really want 0 mod reward cycle len - let first_block_height = sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc) - .saturating_sub(1); - let last_block_height = sortdb - .pox_constants - .reward_cycle_to_block_height(sortdb.first_block_height, cur_rc.saturating_add(1)) - .saturating_sub(1); - - test_debug!( - "Load reward cycle sortitions between {} and {} (rc is {})", - first_block_height, - last_block_height, - cur_rc - ); - - // find all sortitions in this reward cycle - let ih = sortdb.index_handle(&tip.sortition_id); - Self::load_wanted_tenures(&ih, first_block_height, last_block_height) - } -} - -#[test] -fn test_nakamoto_tenure_downloader() { - let ch = ConsensusHash([0x11; 20]); - let private_key = StacksPrivateKey::new(); - let mut test_signers = TestSigners::default(); - - let aggregate_public_key = test_signers.aggregate_public_key.clone(); - - let tenure_start_header = NakamotoBlockHeader { - version: 1, - chain_length: 2, - burn_spent: 3, - consensus_hash: ConsensusHash([0x04; 20]), - parent_block_id: StacksBlockId([0x05; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - - let tenure_change_payload = TenureChangePayload { - tenure_consensus_hash: ConsensusHash([0x04; 20]), - prev_tenure_consensus_hash: ConsensusHash([0x03; 20]), - burn_view_consensus_hash: ConsensusHash([0x04; 20]), - previous_tenure_end: tenure_start_header.parent_block_id.clone(), - previous_tenure_blocks: 1, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x02; 20]), - }; - use stacks_common::types::net::PeerAddress; - let proof_bytes = hex_bytes("9275df67a68c8745c0ff97b48201ee6db447f7c93b23ae24cdc2400f52fdb08a1a6ac7ec71bf9c9c76e96ee4675ebff60625af28718501047bfd87b810c2d2139b73c23bd69de66360953a642c2a330a").unwrap(); - let proof = VRFProof::from_bytes(&proof_bytes[..].to_vec()).unwrap(); - - let coinbase_payload = - TransactionPayload::Coinbase(CoinbasePayload([0x12; 32]), None, Some(proof.clone())); - - let mut coinbase_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&private_key).unwrap(), - coinbase_payload.clone(), - ); - coinbase_tx.chain_id = 0x80000000; - coinbase_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - - let mut tenure_change_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&private_key).unwrap(), - TransactionPayload::TenureChange(tenure_change_payload.clone()), - ); - tenure_change_tx.chain_id = 0x80000000; - tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - - let recipient_addr = - StacksAddress::from_string("ST2YM3J4KQK09V670TD6ZZ1XYNYCNGCWCVTASN5VM").unwrap(); - let mut stx_transfer = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&private_key).unwrap(), - TransactionPayload::TokenTransfer( - recipient_addr.to_account_principal(), - 1, - TokenTransferMemo([0x00; 34]), - ), - ); - stx_transfer.chain_id = 0x80000000; - stx_transfer.anchor_mode = TransactionAnchorMode::OnChainOnly; - - let mut tenure_start_block = NakamotoBlock { - header: tenure_start_header.clone(), - txs: vec![tenure_change_tx.clone(), coinbase_tx.clone()], - }; - test_signers.sign_nakamoto_block(&mut tenure_start_block, 0); - - let mut blocks = vec![tenure_start_block.clone()]; - for i in 0..10 { - let last_block = blocks.last().unwrap(); - let header = NakamotoBlockHeader { - version: 1, - chain_length: last_block.header.chain_length + 1, - burn_spent: last_block.header.burn_spent + 1, - consensus_hash: last_block.header.consensus_hash.clone(), - parent_block_id: last_block.header.block_id(), - tx_merkle_root: Sha512Trunc256Sum([0x06; 32]), - state_index_root: TrieHash([0x07; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - - let mut block = NakamotoBlock { - header, - txs: vec![stx_transfer.clone()], - }; - test_signers.sign_nakamoto_block(&mut block, 0); - blocks.push(block); - } - - let next_tenure_start_header = NakamotoBlockHeader { - version: 1, - chain_length: blocks.last().unwrap().header.chain_length + 1, - burn_spent: blocks.last().unwrap().header.burn_spent + 1, - consensus_hash: ConsensusHash([0x05; 20]), - parent_block_id: blocks.last().unwrap().header.block_id(), - tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), - state_index_root: TrieHash([0x08; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - - let next_tenure_change_payload = TenureChangePayload { - tenure_consensus_hash: ConsensusHash([0x05; 20]), - prev_tenure_consensus_hash: ConsensusHash([0x04; 20]), - burn_view_consensus_hash: ConsensusHash([0x05; 20]), - previous_tenure_end: next_tenure_start_header.parent_block_id.clone(), - previous_tenure_blocks: 11, - cause: TenureChangeCause::BlockFound, - pubkey_hash: Hash160([0x02; 20]), - }; - - let mut next_tenure_change_tx = StacksTransaction::new( - TransactionVersion::Testnet, - TransactionAuth::from_p2pkh(&private_key).unwrap(), - TransactionPayload::TenureChange(next_tenure_change_payload.clone()), - ); - next_tenure_change_tx.chain_id = 0x80000000; - next_tenure_change_tx.anchor_mode = TransactionAnchorMode::OnChainOnly; - - let mut next_tenure_start_block = NakamotoBlock { - header: next_tenure_start_header.clone(), - txs: vec![next_tenure_change_tx.clone(), coinbase_tx.clone()], - }; - test_signers.sign_nakamoto_block(&mut next_tenure_start_block, 0); - - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - - let mut td = NakamotoTenureDownloader::new( - ch, - tenure_start_block.header.block_id(), - next_tenure_start_block.header.block_id(), - naddr.clone(), - aggregate_public_key.clone(), - aggregate_public_key.clone(), - ); - - // must be first block - assert_eq!( - td.state, - NakamotoTenureDownloadState::GetTenureStartBlock(tenure_start_block.header.block_id()) - ); - assert!(td - .try_accept_tenure_start_block(blocks.last().unwrap().clone()) - .is_err()); - assert!(td - .try_accept_tenure_start_block(next_tenure_start_block.clone()) - .is_err()); - - // advance state - assert!(td - .try_accept_tenure_start_block(blocks.first().unwrap().clone()) - .is_ok()); - - let NakamotoTenureDownloadState::WaitForTenureEndBlock(block_id, _) = td.state else { - panic!("wrong state"); - }; - assert_eq!(block_id, next_tenure_start_block.header.block_id()); - assert_eq!(td.tenure_start_block, Some(tenure_start_block.clone())); - assert!(td.tenure_length().is_none()); - - // must be last block - assert!(td.try_accept_tenure_end_block(&tenure_start_block).is_err()); - assert!(td - .try_accept_tenure_end_block(blocks.last().unwrap()) - .is_err()); - - // advance state - assert!(td - .try_accept_tenure_end_block(&next_tenure_start_block) - .is_ok()); - assert_eq!( - td.state, - NakamotoTenureDownloadState::GetTenureBlocks( - next_tenure_start_block.header.parent_block_id.clone() - ) - ); - assert_eq!( - td.tenure_end_header, - Some(( - next_tenure_start_block.header.clone(), - next_tenure_change_payload.clone() - )) - ); - assert_eq!(td.tenure_length(), Some(11)); - - let mut td_one_shot = td.clone(); - - // advance state, one block at a time - for block in blocks.iter().rev() { - if block.header.block_id() == tenure_start_block.header.block_id() { - break; - } - // must be accepted in order - assert!(td - .try_accept_tenure_blocks(vec![next_tenure_start_block.clone()]) - .is_err()); - - let res = td.try_accept_tenure_blocks(vec![block.clone()]); - assert!(res.is_ok()); - assert!(res.unwrap().is_none()); - - // tail pointer moved - assert_eq!( - td.state, - NakamotoTenureDownloadState::GetTenureBlocks(block.header.parent_block_id.clone()) - ); - } - - // get full tenure - let res = td.try_accept_tenure_blocks(vec![tenure_start_block.clone()]); - assert!(res.is_ok()); - let res_blocks = res.unwrap().unwrap(); - assert_eq!(res_blocks.len(), blocks.len()); - assert_eq!(res_blocks, blocks); - assert_eq!(td.state, NakamotoTenureDownloadState::Done); - - // also works if we give blocks in one shot - let res = td_one_shot.try_accept_tenure_blocks(blocks.clone().into_iter().rev().collect()); - assert!(res.is_ok()); - assert_eq!(res.unwrap().unwrap(), blocks); - assert_eq!(td_one_shot.state, NakamotoTenureDownloadState::Done); - - // TODO: - // * bad signature - // * too many blocks -} - -#[test] -fn test_nakamoto_unconfirmed_tenure_downloader() { - let observer = TestEventObserver::new(); - let bitvecs = vec![vec![ - true, true, true, true, true, true, true, true, true, true, - ]]; - - let rc_len = 10u64; - let peer = make_nakamoto_peer_from_invs( - function_name!(), - &observer, - rc_len as u32, - 3, - bitvecs.clone(), - ); - let (mut peer, reward_cycle_invs) = - peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); - - let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - - assert_eq!(tip.block_height, 51); - - let test_signers = TestSigners::default(); - - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - - peer.refresh_burnchain_view(); - let tip_block_id = StacksBlockId::new(&peer.network.stacks_tip.0, &peer.network.stacks_tip.1); - - let tip_ch = peer.network.stacks_tip.0.clone(); - let parent_tip_ch = peer.network.parent_stacks_tip.0.clone(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); - - let unconfirmed_tenure = peer - .chainstate() - .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&tip_ch) - .unwrap(); - let last_confirmed_tenure = peer - .chainstate() - .nakamoto_blocks_db() - .get_all_blocks_in_tenure(&parent_tip_ch) - .unwrap(); - - let parent_parent_header = NakamotoChainState::get_block_header_nakamoto( - peer.chainstate().db(), - &last_confirmed_tenure - .first() - .as_ref() - .unwrap() - .header - .parent_block_id, - ) - .unwrap() - .unwrap(); - let parent_parent_start_header = NakamotoChainState::get_nakamoto_tenure_start_block_header( - peer.chainstate().db(), - &parent_parent_header.consensus_hash, - ) - .unwrap() - .unwrap(); - - assert!(unconfirmed_tenure.len() > 0); - assert!(last_confirmed_tenure.len() > 0); - - assert_eq!( - unconfirmed_tenure.first().as_ref().unwrap().block_id(), - peer.network.tenure_start_block_id - ); - assert_eq!( - unconfirmed_tenure - .first() - .as_ref() - .unwrap() - .header - .parent_block_id, - last_confirmed_tenure.last().as_ref().unwrap().block_id() - ); - - let tip_rc = peer - .network - .burnchain - .block_height_to_reward_cycle(peer.network.burnchain_tip.block_height) - .expect("FATAL: burnchain tip before system start"); - - let highest_confirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - winning_block_id: parent_parent_start_header.index_block_hash(), - processed: false, - burn_height: peer.network.burnchain_tip.block_height - 1, - }; - - let unconfirmed_wanted_tenure = WantedTenure { - tenure_id_consensus_hash: peer.network.stacks_tip.0.clone(), - winning_block_id: last_confirmed_tenure - .first() - .as_ref() - .unwrap() - .header - .parent_block_id - .clone(), - processed: false, - burn_height: peer.network.burnchain_tip.block_height, - }; - - // we've processed the tip already, so we transition straight to the Done state - { - let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(tip_block_id)); - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - - let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), - tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, - ), - tip_height: peer.network.stacks_tip.2, - reward_cycle: tip_rc, - }; - - let sortdb = peer.sortdb.take().unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - utd.try_accept_tenure_info( - &sortdb, - &sort_tip, - peer.chainstate(), - tenure_tip.clone(), - &agg_pubkeys, - ) - .unwrap(); - - peer.sortdb = Some(sortdb); - - assert!(utd.unconfirmed_tenure_start_block.is_some()); - - // because the highest processed block is the same as .tip_block_id, we're done - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); - - // we can request the highest-complete tenure - assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); - assert_eq!( - ntd.state, - NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() - ) - ); - } - - // we've processed the first block in the unconfirmed tenure, but not the tip, so we transition to - // the GetUnconfirmedTenureBlocks(..) state. - { - let mid_tip_block_id = unconfirmed_tenure.first().as_ref().unwrap().block_id(); - - let mut utd = - NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - - let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), - tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, - ), - tip_height: peer.network.stacks_tip.2, - reward_cycle: tip_rc, - }; - - let sortdb = peer.sortdb.take().unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - utd.try_accept_tenure_info( - &sortdb, - &sort_tip, - peer.chainstate(), - tenure_tip.clone(), - &agg_pubkeys, - ) - .unwrap(); - - peer.sortdb = Some(sortdb); - - assert!(utd.unconfirmed_tenure_start_block.is_some()); - - // because we already have processed the start-block of this unconfirmed tenure, we'll - // advance straight to getting more unconfirmed tenure blocks - assert_eq!( - utd.state, - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() - ) - ); - assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); - - // fill in blocks - for (i, block) in unconfirmed_tenure.iter().enumerate().rev() { - let res = utd - .try_accept_unconfirmed_tenure_blocks(vec![block.clone()]) - .unwrap(); - if i == 0 { - // res won't contain the first block because it stopped processing once it reached - // a block that the node knew - assert_eq!(res.unwrap(), unconfirmed_tenure[1..].to_vec()); - break; - } else { - assert!(res.is_none()); - } - } - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); - - // we can request the highest-complete tenure - assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); - assert_eq!( - ntd.state, - NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() - ) - ); - } - - // we've processed the middle block in the unconfirmed tenure, but not the tip, so we transition to - // the GetUnconfirmedTenureBlocks(..) state. - { - let mid_tip_block_id = unconfirmed_tenure.get(5).unwrap().block_id(); - - let mut utd = - NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), Some(mid_tip_block_id)); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - - let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), - tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, - ), - tip_height: peer.network.stacks_tip.2, - reward_cycle: tip_rc, - }; - - let sortdb = peer.sortdb.take().unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - utd.try_accept_tenure_info( - &sortdb, - &sort_tip, - peer.chainstate(), - tenure_tip.clone(), - &agg_pubkeys, - ) - .unwrap(); - - peer.sortdb = Some(sortdb); - - assert!(utd.unconfirmed_tenure_start_block.is_some()); - - // because we already have processed the start-block of this unconfirmed tenure, we'll - // advance straight to getting more unconfirmed tenure blocks - assert_eq!( - utd.state, - NakamotoUnconfirmedDownloadState::GetUnconfirmedTenureBlocks( - tenure_tip.tip_block_id.clone() - ) - ); - assert_eq!(utd.tenure_tip, Some(tenure_tip.clone())); - - // fill in blocks - for (i, block) in unconfirmed_tenure.iter().enumerate().rev() { - let res = utd - .try_accept_unconfirmed_tenure_blocks(vec![block.clone()]) - .unwrap(); - if i == unconfirmed_tenure.len() - 5 { - // got back only the blocks we were missing - assert_eq!( - res.unwrap(), - unconfirmed_tenure[(unconfirmed_tenure.len() - 4)..].to_vec() - ); - break; - } else { - assert!(res.is_none()); - } - } - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); - - // we can request the highest-complete tenure - assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); - assert_eq!( - ntd.state, - NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() - ) - ); - } - - // we haven't processed anything yet. - // serve all of the unconfirmed blocks in one shot. - { - let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - - let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), - tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, - ), - tip_height: peer.network.stacks_tip.2, - reward_cycle: tip_rc, - }; - - let sortdb = peer.sortdb.take().unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - utd.try_accept_tenure_info( - &sortdb, - &sort_tip, - peer.chainstate(), - tenure_tip.clone(), - &agg_pubkeys, - ) - .unwrap(); - - peer.sortdb = Some(sortdb); - - assert!(utd.unconfirmed_tenure_start_block.is_some()); - - let res = utd - .try_accept_unconfirmed_tenure_blocks( - unconfirmed_tenure.clone().into_iter().rev().collect(), - ) - .unwrap(); - assert_eq!(res.unwrap(), unconfirmed_tenure); - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::Done); - - // we can request the highest-complete tenure - assert!(!utd.need_highest_complete_tenure(peer.chainstate()).unwrap()); - - let ntd = utd - .make_highest_complete_tenure_downloader( - &highest_confirmed_wanted_tenure, - &unconfirmed_wanted_tenure, - ) - .unwrap(); - assert_eq!( - ntd.state, - NakamotoTenureDownloadState::GetTenureStartBlock( - unconfirmed_wanted_tenure.winning_block_id.clone() - ) - ); - } - - // bad block signature - { - let mut utd = NakamotoUnconfirmedTenureDownloader::new(naddr.clone(), None); - utd.confirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - utd.unconfirmed_aggregate_public_key = - Some(agg_pubkeys.get(&tip_rc).cloned().unwrap().unwrap()); - - assert_eq!(utd.state, NakamotoUnconfirmedDownloadState::GetTenureInfo); - - let tenure_tip = RPCGetTenureInfo { - consensus_hash: peer.network.stacks_tip.0.clone(), - tenure_start_block_id: peer.network.tenure_start_block_id.clone(), - parent_consensus_hash: peer.network.parent_stacks_tip.0.clone(), - parent_tenure_start_block_id: StacksBlockId::new( - &peer.network.parent_stacks_tip.0, - &peer.network.parent_stacks_tip.1, - ), - tip_block_id: StacksBlockId::new( - &peer.network.stacks_tip.0, - &peer.network.stacks_tip.1, - ), - tip_height: peer.network.stacks_tip.2, - reward_cycle: tip_rc, - }; - - let sortdb = peer.sortdb.take().unwrap(); - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - utd.try_accept_tenure_info( - &sortdb, - &sort_tip, - peer.chainstate(), - tenure_tip.clone(), - &agg_pubkeys, - ) - .unwrap(); - - peer.sortdb = Some(sortdb); - - assert!(utd.unconfirmed_tenure_start_block.is_some()); - - let mut bad_block = unconfirmed_tenure.last().cloned().unwrap(); - bad_block.header.version += 1; - - assert!(utd - .try_accept_unconfirmed_tenure_blocks(vec![bad_block]) - .is_err()); - } -} - -#[test] -fn test_tenure_start_end_from_inventory() { - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - let rc_len = 12u16; - let mut invs = NakamotoTenureInv::new(0, u64::from(rc_len), 0, naddr.clone()); - let pox_constants = PoxConstants::new( - rc_len.into(), - 5, - 3, - 0, - 25, - u64::MAX, - u64::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - ); - let first_burn_height = 100u64; - - // make some invs - let num_rcs = 6; - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - 0, - ); - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - false, false, false, false, false, false, false, true, true, true, false, false, - ] - .as_slice(), - ) - .unwrap(), - 1, - ); - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, false, false, false, false, false, false, true, true, true, false, true, - ] - .as_slice(), - ) - .unwrap(), - 2, - ); - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, false, true, false, true, false, true, true, true, true, true, false, - ] - .as_slice(), - ) - .unwrap(), - 3, - ); - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - false, true, false, true, false, true, true, true, true, true, false, true, - ] - .as_slice(), - ) - .unwrap(), - 4, - ); - invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - false, false, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - 5, - ); - - let mut wanted_tenures = vec![]; - let mut next_wanted_tenures = vec![]; - for i in 0..rc_len { - wanted_tenures.push(WantedTenure::new( - ConsensusHash([i as u8; 20]), - StacksBlockId([i as u8; 32]), - u64::from(i) + first_burn_height, - )); - next_wanted_tenures.push(WantedTenure::new( - ConsensusHash([(i + 128) as u8; 20]), - StacksBlockId([(i + 128) as u8; 32]), - u64::from(i) + first_burn_height, - )); - } - let mut all_tenures = wanted_tenures.clone(); - all_tenures.append(&mut next_wanted_tenures.clone()); - - // check the case where we only have one Nakamoto rewrad cycle - for rc in 0..num_rcs { - let available = TenureStartEnd::from_inventory( - rc, - &wanted_tenures, - None, - &pox_constants, - first_burn_height, - &invs, - ) - .unwrap(); - let bits = invs.tenures_inv.get(&rc).unwrap(); - for (i, wt) in wanted_tenures.iter().enumerate() { - if i >= (rc_len - 1).into() { - // nothing here - assert!(available.get(&wt.tenure_id_consensus_hash).is_none()); - continue; - } - - let tenure_start_end_opt = available.get(&wt.tenure_id_consensus_hash); - if bits.get(i as u16).unwrap() { - // this sortition had a tenure - let mut j = (i + 1) as u16; - let mut tenure_start_index = None; - let mut tenure_end_index = None; - - while j < bits.len() { - if bits.get(j).unwrap() { - tenure_start_index = Some(j); - j += 1; - break; - } - j += 1; - } - - while j < bits.len() { - if bits.get(j).unwrap() { - tenure_end_index = Some(j); - break; - } - j += 1; - } - - if tenure_start_index.is_some() && tenure_end_index.is_some() { - let tenure_start_end = tenure_start_end_opt.unwrap(); - assert_eq!( - wanted_tenures[tenure_start_index.unwrap() as usize].winning_block_id, - tenure_start_end.start_block_id - ); - assert_eq!( - wanted_tenures[tenure_end_index.unwrap() as usize].winning_block_id, - tenure_start_end.end_block_id - ); - } else { - assert!(tenure_start_end_opt.is_none()); - } - } else { - // no tenure here - assert!( - tenure_start_end_opt.is_none(), - "{}", - format!( - "tenure_start_end = {:?}, rc = {}, i = {}, wt = {:?}", - &tenure_start_end_opt, rc, i, &wt - ) - ); - } - } - } - - // check the case where we have at least two Nakamoto rewrad cycles. - // the available tenures should straddle the reward cycle boundary. - for rc in 0..(num_rcs - 1) { - debug!("rc = {}", rc); - let available = TenureStartEnd::from_inventory( - rc, - &wanted_tenures, - Some(&next_wanted_tenures), - &pox_constants, - first_burn_height, - &invs, - ) - .unwrap(); - - // need to check across two reward cycles - let bits_cur_rc = invs.tenures_inv.get(&rc).unwrap(); - let bits_next_rc = invs.tenures_inv.get(&(rc + 1)).unwrap(); - let mut bits = BitVec::<2100>::zeros(rc_len * 2).unwrap(); - for i in 0..rc_len { - if bits_cur_rc.get(i).unwrap() { - bits.set(i, true).unwrap(); - } - if bits_next_rc.get(i).unwrap() { - bits.set(i + rc_len, true).unwrap(); - } - } - - for (i, wt) in wanted_tenures.iter().enumerate() { - let tenure_start_end_opt = available.get(&wt.tenure_id_consensus_hash); - if bits - .get(i as u16) - .expect(&format!("failed to get bit {}: {:?}", i, &wt)) - { - // this sortition had a tenure - let mut j = (i + 1) as u16; - let mut tenure_start_index = None; - let mut tenure_end_index = None; - - while j < bits.len() { - if bits.get(j).unwrap() { - tenure_start_index = Some(j); - j += 1; - break; - } - j += 1; - } - - while j < bits.len() { - if bits.get(j).unwrap() { - tenure_end_index = Some(j); - break; - } - j += 1; - } - - if tenure_start_index.is_some() && tenure_end_index.is_some() { - debug!( - "rc = {}, i = {}, tenure_start_index = {:?}, tenure_end_index = {:?}", - rc, i, &tenure_start_index, &tenure_end_index - ); - let tenure_start_end = tenure_start_end_opt.expect(&format!( - "failed to get tenure_start_end_opt: i = {}, wt = {:?}", - i, &wt - )); - assert_eq!( - all_tenures[tenure_start_index.unwrap() as usize].winning_block_id, - tenure_start_end.start_block_id - ); - assert_eq!( - all_tenures[tenure_end_index.unwrap() as usize].winning_block_id, - tenure_start_end.end_block_id - ); - } else { - assert!(tenure_start_end_opt.is_none()); - } - } else { - // no tenure here - assert!( - tenure_start_end_opt.is_none(), - "{}", - format!( - "tenure_start_end = {:?}, rc = {}, i = {}, wt = {:?}", - &tenure_start_end_opt, rc, i, &wt - ) - ); - } - } - } -} - -/// Test all of the functionality needed to transform a peer's reported tenure inventory into a -/// tenure downloader and download schedule. -#[test] -fn test_make_tenure_downloaders() { - let observer = TestEventObserver::new(); - let bitvecs = vec![vec![ - true, true, true, true, true, true, true, true, true, true, - ]]; - - let rc_len = 10u64; - let peer = make_nakamoto_peer_from_invs( - function_name!(), - &observer, - rc_len as u32, - 3, - bitvecs.clone(), - ); - let (mut peer, reward_cycle_invs) = - peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); - - let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - - assert_eq!(tip.block_height, 51); - - let test_signers = TestSigners::default(); - let agg_pubkeys = peer.network.aggregate_public_keys.clone(); - - // test load_wanted_tenures() - { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - tip.block_height - rc_len, - tip.block_height, - ) - .unwrap(); - assert_eq!(wanted_tenures.len(), rc_len as usize); - - for i in (tip.block_height - rc_len)..tip.block_height { - let w = (i - (tip.block_height - rc_len)) as usize; - let i = i as usize; - assert_eq!( - wanted_tenures[w].tenure_id_consensus_hash, - all_sortitions[i].consensus_hash - ); - assert_eq!( - wanted_tenures[w].winning_block_id.0, - all_sortitions[i].winning_stacks_block_hash.0 - ); - assert_eq!(wanted_tenures[w].processed, false); - } - - let Err(NetError::DBError(DBError::NotFoundError)) = - NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - tip.block_height + 1, - tip.block_height + 2, - ) - else { - panic!() - }; - - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - tip.block_height + 3, - tip.block_height, - ) - .unwrap(); - assert_eq!(wanted_tenures.len(), 0); - } - - // test load_wanted_tenures_for_reward_cycle - { - let sortdb = peer.sortdb(); - let rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap() - - 1; - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle( - rc, - &tip, - peer.sortdb(), - ) - .unwrap(); - assert_eq!(wanted_tenures.len(), rc_len as usize); - - for i in (tip.block_height - 1 - rc_len)..(tip.block_height - 1) { - let w = (i - (tip.block_height - 1 - rc_len)) as usize; - let i = i as usize; - assert_eq!( - wanted_tenures[w].tenure_id_consensus_hash, - all_sortitions[i].consensus_hash - ); - assert_eq!( - wanted_tenures[w].winning_block_id.0, - all_sortitions[i].winning_stacks_block_hash.0 - ); - assert_eq!(wanted_tenures[w].processed, false); - } - - let Err(NetError::DBError(DBError::NotFoundError)) = - NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle( - rc + 1, - &tip, - peer.sortdb(), - ) - else { - panic!() - }; - } - - // test load_wanted_tenures_at_tip - { - let sortdb = peer.sortdb(); - let wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_at_tip(None, &tip, sortdb, &vec![]) - .unwrap(); - assert_eq!(wanted_tenures.len(), 2); - for i in (tip.block_height - 1)..=(tip.block_height) { - let w = (i - (tip.block_height - 1)) as usize; - let i = i as usize; - assert_eq!( - wanted_tenures[w].tenure_id_consensus_hash, - all_sortitions[i].consensus_hash - ); - assert_eq!( - wanted_tenures[w].winning_block_id.0, - all_sortitions[i].winning_stacks_block_hash.0 - ); - assert_eq!(wanted_tenures[w].processed, false); - } - - let all_wanted_tenures = wanted_tenures; - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures_at_tip( - None, - &tip, - sortdb, - &vec![all_wanted_tenures[0].clone()], - ) - .unwrap(); - assert_eq!(wanted_tenures.len(), 1); - - assert_eq!( - wanted_tenures[0].tenure_id_consensus_hash, - all_sortitions[tip.block_height as usize].consensus_hash - ); - assert_eq!( - wanted_tenures[0].winning_block_id.0, - all_sortitions[tip.block_height as usize] - .winning_stacks_block_hash - .0 - ); - assert_eq!(wanted_tenures[0].processed, false); - - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures_at_tip( - None, - &tip, - sortdb, - &all_wanted_tenures, - ) - .unwrap(); - assert_eq!(wanted_tenures.len(), 0); - } - - // test inner_update_processed_wanted_tenures - { - let sortdb = peer.sortdb(); - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let mut wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - nakamoto_start, - tip.block_height, - ) - .unwrap(); - - let chainstate = peer.chainstate(); - NakamotoDownloadStateMachine::inner_update_processed_wanted_tenures( - nakamoto_start, - &mut wanted_tenures, - chainstate, - ) - .unwrap(); - - for wt in wanted_tenures { - if !wt.processed { - warn!("not processed: {:?}", &wt); - } - assert!(wt.processed); - } - } - - // test load_tenure_start_blocks - { - let sortdb = peer.sortdb(); - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let wanted_tenures = NakamotoDownloadStateMachine::load_wanted_tenures( - &ih, - nakamoto_start, - tip.block_height + 1, - ) - .unwrap(); - - // the first block loaded won't have data, since the blocks are loaded by consensus hash - // but the resulting map is keyed by block ID (and we don't have the first block ID) - let wanted_tenures_with_blocks = wanted_tenures[1..].to_vec(); - - let chainstate = peer.chainstate(); - let mut tenure_start_blocks = HashMap::new(); - NakamotoDownloadStateMachine::load_tenure_start_blocks( - &wanted_tenures, - chainstate, - &mut tenure_start_blocks, - ) - .unwrap(); - assert_eq!(tenure_start_blocks.len(), wanted_tenures.len()); - - for wt in wanted_tenures_with_blocks { - if tenure_start_blocks.get(&wt.winning_block_id).is_none() { - warn!("No tenure start block for wanted tenure {:?}", &wt); - } - - let block = tenure_start_blocks.get(&wt.winning_block_id).unwrap(); - assert!(block.is_wellformed_tenure_start_block().unwrap()); - } - } - - // test find_available_tenures - { - // test for reward cycle - let sortdb = peer.sortdb(); - let rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap() - - 1; - let rc_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle(rc, &tip, sortdb) - .unwrap(); - assert_eq!(rc_wanted_tenures.len(), rc_len as usize); - - // also test for tip - let tip_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_at_tip(None, &tip, sortdb, &[]) - .unwrap(); - - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - - // full invs - let mut full_invs = NakamotoTenureInv::new(0, u64::from(rc_len), 0, naddr.clone()); - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc, - ); - - let mut full_inventories = HashMap::new(); - full_inventories.insert(naddr.clone(), full_invs.clone()); - - let available = NakamotoDownloadStateMachine::find_available_tenures( - rc, - &rc_wanted_tenures, - full_inventories.iter(), - ); - assert_eq!(available.len(), rc_len as usize); - for wt in rc_wanted_tenures.iter() { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![naddr.clone()] - ); - } - - // sparse invs - let mut sparse_invs = NakamotoTenureInv::new(0, u64::from(rc_len), 0, naddr.clone()); - sparse_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - false, true, false, true, false, true, false, true, false, true, false, true, - ] - .as_slice(), - ) - .unwrap(), - rc, - ); - - let mut sparse_inventories = HashMap::new(); - sparse_inventories.insert(naddr.clone(), sparse_invs.clone()); - - let available = NakamotoDownloadStateMachine::find_available_tenures( - rc, - &rc_wanted_tenures, - sparse_inventories.iter(), - ); - assert_eq!(available.len(), rc_len as usize); - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - if i % 2 == 0 { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![] - ); - } else { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![naddr.clone()] - ); - } - } - - // no invs - let available = NakamotoDownloadStateMachine::find_available_tenures( - rc + 1, - &rc_wanted_tenures, - full_inventories.iter(), - ); - assert_eq!(available.len(), rc_len as usize); - - // tip full invs - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc + 1, - ); - full_inventories.insert(naddr.clone(), full_invs.clone()); - - let available = NakamotoDownloadStateMachine::find_available_tenures( - rc + 1, - &tip_wanted_tenures, - full_inventories.iter(), - ); - assert_eq!(available.len(), tip_wanted_tenures.len()); - for wt in tip_wanted_tenures.iter() { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![naddr.clone()] - ); - } - - // tip sparse invs - sparse_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - false, true, false, true, false, true, false, true, false, true, false, true, - ] - .as_slice(), - ) - .unwrap(), - rc + 1, - ); - sparse_inventories.insert(naddr.clone(), sparse_invs.clone()); - - let available = NakamotoDownloadStateMachine::find_available_tenures( - rc + 1, - &tip_wanted_tenures, - sparse_inventories.iter(), - ); - assert_eq!(available.len(), tip_wanted_tenures.len()); - for (i, wt) in tip_wanted_tenures.iter().enumerate() { - if i % 2 == 0 { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![] - ); - } else { - assert_eq!( - available.get(&wt.tenure_id_consensus_hash).unwrap(), - &vec![naddr.clone()] - ); - } - } - } - - // test find_tenure_block_ids - { - let sortdb = peer.sortdb(); - let rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap() - - 1; - let pox_constants = sortdb.pox_constants.clone(); - let first_burn_height = sortdb.first_block_height; - - let rc_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle(rc, &tip, sortdb) - .unwrap(); - assert_eq!(rc_wanted_tenures.len(), rc_len as usize); - - let tip_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_at_tip(None, &tip, sortdb, &[]) - .unwrap(); - - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - - let mut full_invs = NakamotoTenureInv::new(0, u64::from(rc_len), 0, naddr.clone()); - - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc, - ); - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc + 1, - ); - - let mut full_inventories = HashMap::new(); - full_inventories.insert(naddr.clone(), full_invs.clone()); - - let tenure_block_ids = NakamotoDownloadStateMachine::find_tenure_block_ids( - rc, - &rc_wanted_tenures, - Some(&tip_wanted_tenures), - &pox_constants, - first_burn_height, - full_inventories.iter(), - ); - assert_eq!(tenure_block_ids.len(), 1); - - let available_tenures = tenure_block_ids.get(&naddr).unwrap(); - - // every tenure in rc_wanted_tenures maps to a start/end - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let start_end = available_tenures.get(&wt.tenure_id_consensus_hash).unwrap(); - if i + 1 < rc_wanted_tenures.len() { - assert_eq!( - start_end.start_block_id, - rc_wanted_tenures[i + 1].winning_block_id - ); - } else { - assert_eq!( - start_end.start_block_id, - tip_wanted_tenures[i - (rc_wanted_tenures.len() - 1)].winning_block_id - ); - } - if i + 2 < rc_wanted_tenures.len() { - assert_eq!( - start_end.end_block_id, - rc_wanted_tenures[i + 2].winning_block_id - ); - } else { - assert_eq!( - start_end.end_block_id, - tip_wanted_tenures[i - (rc_wanted_tenures.len() - 2)].winning_block_id - ); - } - } - - // the tenure-start blocks correspond to the wanted tenure ID consensus hash - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - // this may be before epoch 3.0 - let sortdb = peer.sortdb(); - let sn = SortitionDB::get_block_snapshot_consensus( - sortdb.conn(), - &wt.tenure_id_consensus_hash, - ) - .unwrap() - .unwrap(); - if sn.block_height < nakamoto_start { - continue; - } - - let chainstate = peer.chainstate(); - let start_end = available_tenures.get(&wt.tenure_id_consensus_hash).unwrap(); - let hdr = NakamotoChainState::get_nakamoto_tenure_start_block_header( - chainstate.db(), - &wt.tenure_id_consensus_hash, - ) - .unwrap() - .unwrap(); - assert_eq!(hdr.index_block_hash(), start_end.start_block_id); - } - - // none of the tip ones do, since there are only two - let tenure_block_ids = NakamotoDownloadStateMachine::find_tenure_block_ids( - rc + 1, - &tip_wanted_tenures, - None, - &pox_constants, - first_burn_height, - full_inventories.iter(), - ); - assert_eq!(tenure_block_ids.len(), 1); - - let available_tenures = tenure_block_ids.get(&naddr).unwrap(); - assert_eq!(available_tenures.len(), 0); - } - - // test make_ibd_download_schedule - // test make_rarest_first_download_schedule - { - let sortdb = peer.sortdb(); - let rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap() - - 1; - let rc_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle(rc, &tip, sortdb) - .unwrap(); - assert_eq!(rc_wanted_tenures.len(), rc_len as usize); - - let mut available: HashMap> = HashMap::new(); - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - for j in i..(rc_len as usize) { - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: (i * (rc_len as usize) + j + 123) as u16, - public_key_hash: Hash160([0xff; 20]), - }; - if let Some(addrs) = available.get_mut(&wt.tenure_id_consensus_hash) { - addrs.push(naddr); - } else { - available.insert(wt.tenure_id_consensus_hash.clone(), vec![naddr]); - } - } - } - - // sanity check -- the ith wanted tenure is available from rc_len - i neighbors - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let addrs = available.get(&wt.tenure_id_consensus_hash).unwrap(); - assert_eq!(addrs.len(), (rc_len as usize) - i); - } - - // check full schedule -- assume nakamoto_start is 0 - let ibd_schedule = NakamotoDownloadStateMachine::make_ibd_download_schedule( - 0, - &rc_wanted_tenures, - &available, - ); - assert_eq!(ibd_schedule.len(), rc_len as usize); - for (i, ch) in ibd_schedule.iter().enumerate() { - // in IBD, we download in sortiiton order - assert_eq!(&rc_wanted_tenures[i].tenure_id_consensus_hash, ch); - } - - // check full schedule -- assume nakamoto_start is 0 - let rarest_first_schedule = - NakamotoDownloadStateMachine::make_rarest_first_download_schedule( - 0, - &rc_wanted_tenures, - &available, - ); - assert_eq!(rarest_first_schedule.len(), rc_len as usize); - for (i, ch) in rarest_first_schedule.iter().enumerate() { - // in steady-state, we download in rarest-first order. - // Per the above sanity check, this would be in reverse order due to the way we - // constructed `available`. - assert_eq!( - &rc_wanted_tenures[(rc_len as usize) - i - 1].tenure_id_consensus_hash, - ch - ); - } - - // check partial schedule -- assume nakamoto_start is not 0 - let ibd_schedule = NakamotoDownloadStateMachine::make_ibd_download_schedule( - nakamoto_start, - &rc_wanted_tenures, - &available, - ); - let offset = (nakamoto_start % rc_len) as usize; - assert_eq!(ibd_schedule.len(), (rc_len as usize) - offset); - for (i, ch) in ibd_schedule.iter().enumerate() { - // in IBD, we download in sortiiton order - assert_eq!(&rc_wanted_tenures[i + offset].tenure_id_consensus_hash, ch); - assert!(rc_wanted_tenures[i + offset].burn_height >= nakamoto_start); - } - - // check partial schedule -- assume nakamoto_start is not 0 - let rarest_first_schedule = - NakamotoDownloadStateMachine::make_rarest_first_download_schedule( - nakamoto_start, - &rc_wanted_tenures, - &available, - ); - assert_eq!(rarest_first_schedule.len(), (rc_len as usize) - offset); - for (i, ch) in rarest_first_schedule.iter().enumerate() { - // in steady-state, we download in rarest-first order. - // Per the above sanity check, this would be in reverse order due to the way we - // constructed `available`. - assert_eq!( - &rc_wanted_tenures[(rc_len as usize) - 1 - i].tenure_id_consensus_hash, - ch - ); - assert!(rc_wanted_tenures[i + offset].burn_height >= nakamoto_start); - } - } - - // test make_tenure_downloaders - { - let mut downloaders = NakamotoTenureDownloaderSet::new(); - - let sortdb = peer.sortdb(); - let rc = sortdb - .pox_constants - .block_height_to_reward_cycle(sortdb.first_block_height, tip.block_height) - .unwrap() - - 1; - let rc_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_for_reward_cycle(rc, &tip, sortdb) - .unwrap(); - assert_eq!(rc_wanted_tenures.len(), rc_len as usize); - - let tip_wanted_tenures = - NakamotoDownloadStateMachine::load_wanted_tenures_at_tip(None, &tip, sortdb, &[]) - .unwrap(); - - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: 123, - public_key_hash: Hash160([0xff; 20]), - }; - - let mut full_invs = NakamotoTenureInv::new(0, u64::from(rc_len), 0, naddr.clone()); - - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc, - ); - full_invs.merge_tenure_inv( - BitVec::<2100>::try_from( - vec![ - true, true, true, true, true, true, true, true, true, true, true, true, - ] - .as_slice(), - ) - .unwrap(), - rc + 1, - ); - - let mut full_inventories = HashMap::new(); - full_inventories.insert(naddr.clone(), full_invs.clone()); - - let mut tenure_block_ids = NakamotoDownloadStateMachine::find_tenure_block_ids( - rc, - &rc_wanted_tenures, - Some(&tip_wanted_tenures), - &sortdb.pox_constants, - sortdb.first_block_height, - full_inventories.iter(), - ); - assert_eq!(tenure_block_ids.len(), 1); - - let availability = tenure_block_ids.get(&naddr).cloned().unwrap(); - - let mut available: HashMap> = HashMap::new(); - let mut available_by_index = vec![]; - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - for j in i..=(rc_len as usize) { - let naddr = NeighborAddress { - addrbytes: PeerAddress([0xff; 16]), - port: (i * (rc_len as usize) + j + 123) as u16, - public_key_hash: Hash160([0xff; 20]), - }; - - // expand availability -- each neighbor has the same invs - tenure_block_ids.insert(naddr.clone(), availability.clone()); - - if let Some(addrs) = available.get_mut(&wt.tenure_id_consensus_hash) { - addrs.push(naddr); - } else { - available.insert(wt.tenure_id_consensus_hash.clone(), vec![naddr]); - } - } - available_by_index.push( - available - .get(&wt.tenure_id_consensus_hash) - .cloned() - .unwrap(), - ); - } - - // sanity check -- the ith wanted tenure is available from rc_len - i neighbors - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let addrs = available.get(&wt.tenure_id_consensus_hash).unwrap(); - assert_eq!(addrs.len(), (rc_len as usize) - i + 1); - } - - // pretend nakamoto_start is 0 for now, so we can treat this like a full reward cycle - let mut ibd_schedule = NakamotoDownloadStateMachine::make_ibd_download_schedule( - 0, - &rc_wanted_tenures, - &available, - ); - - let old_schedule = ibd_schedule.clone(); - let sched_len = ibd_schedule.len(); - - // make 6 downloaders - downloaders.make_tenure_downloaders( - &mut ibd_schedule, - &mut available, - &tenure_block_ids, - 6, - &agg_pubkeys, - ); - - // made all 6 downloaders - assert_eq!(ibd_schedule.len() + 6, sched_len); - assert_eq!(downloaders.downloaders.len(), 6); - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let naddrs = available.get(&wt.tenure_id_consensus_hash).unwrap(); - if i < 6 { - assert_eq!(naddrs.len(), (rc_len as usize) - i); - } else { - assert_eq!(naddrs.len(), (rc_len as usize) - i + 1); - } - } - - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let possible_addrs = available_by_index.get(i).unwrap(); - let mut found = false; - for addr in possible_addrs.iter() { - if downloaders.has_downloader(addr) { - found = true; - break; - } - } - - if i < 6 { - assert!(found); - } else { - assert!(!found); - } - } - - // make 6 more downloaders - downloaders.make_tenure_downloaders( - &mut ibd_schedule, - &mut available, - &tenure_block_ids, - 12, - &agg_pubkeys, - ); - - // only made 4 downloaders got created - assert_eq!(ibd_schedule.len(), 0); - assert_eq!(downloaders.downloaders.len(), 10); - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let naddrs = available.get(&wt.tenure_id_consensus_hash).unwrap(); - assert_eq!(naddrs.len(), (rc_len as usize) - i); - } - - for (i, wt) in rc_wanted_tenures.iter().enumerate() { - let possible_addrs = available_by_index.get(i).unwrap(); - let mut found = false; - for addr in possible_addrs.iter() { - if downloaders.has_downloader(addr) { - found = true; - break; - } - } - - assert!(found); - } - } -} - -#[test] -fn test_nakamoto_download_run_2_peers() { - let observer = TestEventObserver::new(); - let bitvecs = vec![ - // full reward cycle - vec![true, true, true, true, true, true, true, true, true, true], - // alternating reward cycle, but with a full prepare phase - vec![true, false, true, false, true, true, true, true, true, true], - // minimum viable reward cycle -- empty reward phase, an anchor block sortition, and two subsequent - // sortitions to ensure that the anchor block's start/end blocks are written to the burnchain. - vec![ - false, false, false, false, true, true, false, true, false, false, - ], - // a long period of no sortitions that spans a reward cycle boundary - vec![false, false, true, true, true, true, true, true, true, true], - ]; - - let rc_len = 10u64; - let peer = make_nakamoto_peer_from_invs( - function_name!(), - &observer, - rc_len as u32, - 5, - bitvecs.clone(), - ); - let (mut peer, reward_cycle_invs) = - peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); - - let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - - assert_eq!(tip.block_height, 81); - - // make a neighbor from this peer - let boot_observer = TestEventObserver::new(); - let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); - let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - - // boot up the boot peer's burnchain - for height in 25..tip.block_height { - let ops = peer - .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); - let sn = { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); - sn - }; - test_debug!( - "boot_peer tip height={} hash={}", - sn.block_height, - &sn.burn_header_hash - ); - test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, - sn.block_height, - &sn.burn_header_hash, - ops.len() as u64, - ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); - } - - let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); - - // start running that peer so we can boot off of it - let (term_sx, term_rx) = sync_channel(1); - thread::scope(|s| { - s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) - .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - - loop { - boot_peer - .run_with_ibd(true, Some(&mut boot_dns_client)) - .unwrap(); - - let (stacks_tip_ch, stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) - .unwrap(); - - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - - last_stacks_tip_ch = stacks_tip_ch; - last_stacks_tip_bhh = stacks_tip_bhh; - - debug!( - "Booting peer's stacks tip is now {:?}", - &boot_peer.network.stacks_tip - ); - if stacks_tip_ch == canonical_stacks_tip_ch { - break; - } - } - - term_sx.send(()).unwrap(); - }); - - loop { - if term_rx.try_recv().is_ok() { - break; - } - peer.step_with_ibd(false).unwrap(); - } - }); - - boot_dns_thread_handle.join().unwrap(); -} - -#[test] -fn test_nakamoto_unconfirmed_download_run_2_peers() { - let observer = TestEventObserver::new(); - let bitvecs = vec![ - // full reward cycle - vec![true, true, true, true, true, true, true, true, true, true], - ]; - - let rc_len = 10u64; - let peer = make_nakamoto_peer_from_invs( - function_name!(), - &observer, - rc_len as u32, - 5, - bitvecs.clone(), - ); - let (mut peer, reward_cycle_invs) = - peer_get_nakamoto_invs(peer, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]); - - let nakamoto_start = - NakamotoBootPlan::nakamoto_first_tenure_height(&peer.config.burnchain.pox_constants); - - let all_sortitions = peer.sortdb().get_all_snapshots().unwrap(); - let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb().conn()).unwrap(); - - assert_eq!(tip.block_height, 51); - - // make a neighbor from this peer - let boot_observer = TestEventObserver::new(); - let privk = StacksPrivateKey::from_seed(&[0, 1, 2, 3, 4]); - let mut boot_peer = peer.neighbor_with_observer(privk, Some(&boot_observer)); - - let all_burn_block_ops: Vec<(u64, Vec<_>)> = (26..=tip.block_height) - .map(|height| { - ( - height, - peer.get_burnchain_block_ops_at_height(height) - .unwrap_or(vec![]), - ) - }) - .collect(); - - let all_sortitions: Vec = all_burn_block_ops - .iter() - .map(|(height, ops)| { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(*height).unwrap().unwrap(); - sn - }) - .collect(); - - let mut all_block_headers: HashMap = HashMap::new(); - for sn in all_sortitions.iter() { - if let Some(header) = NakamotoChainState::get_block_header_by_consensus_hash( - peer.chainstate().db(), - &sn.consensus_hash, - ) - .unwrap() - { - all_block_headers.insert(sn.consensus_hash.clone(), header); - } - } - - let (canonical_stacks_tip_ch, canonical_stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(peer.sortdb().conn()).unwrap(); - - // boot up the boot peer's burnchain - for height in 25..tip.block_height { - let ops = peer - .get_burnchain_block_ops_at_height(height + 1) - .unwrap_or(vec![]); - let sn = { - let ih = peer.sortdb().index_handle(&tip.sortition_id); - let sn = ih.get_block_snapshot_by_height(height).unwrap().unwrap(); - sn - }; - test_debug!( - "boot_peer tip height={} hash={}", - sn.block_height, - &sn.burn_header_hash - ); - test_debug!("ops = {:?}", &ops); - let block_header = TestPeer::make_next_burnchain_block( - &boot_peer.config.burnchain, - sn.block_height, - &sn.burn_header_hash, - ops.len() as u64, - ); - TestPeer::add_burnchain_block(&boot_peer.config.burnchain, &block_header, ops.clone()); - } - - let (mut boot_dns_client, boot_dns_thread_handle) = dns_thread_start(100); - - // start running that peer so we can boot off of it - let (term_sx, term_rx) = sync_channel(1); - thread::scope(|s| { - s.spawn(move || { - let mut burnchain_ptr = 0; - - // kick things off - let (_burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - - let (mut last_stacks_tip_ch, mut last_stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) - .unwrap(); - let mut last_burnchain_sync = get_epoch_time_secs(); - let deadline = 5; - - loop { - boot_peer - .run_with_ibd(true, Some(&mut boot_dns_client)) - .unwrap(); - - let (stacks_tip_ch, stacks_tip_bhh) = - SortitionDB::get_canonical_stacks_chain_tip_hash(boot_peer.sortdb().conn()) - .unwrap(); - - if burnchain_ptr < all_burn_block_ops.len() { - let (burn_height, burn_ops) = all_burn_block_ops.get(burnchain_ptr).unwrap(); - let expected_sortition = all_sortitions.get(burnchain_ptr).unwrap(); - if !expected_sortition.sortition { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - if !all_block_headers.contains_key(&expected_sortition.consensus_hash) { - if last_burnchain_sync + deadline < get_epoch_time_secs() { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - continue; - } - - let header = all_block_headers - .get(&expected_sortition.consensus_hash) - .unwrap(); - debug!( - "Waiting for Stacks block {} (sortition {} height {} burn height {})", - &header.index_block_hash(), - &expected_sortition.consensus_hash, - &header.anchored_header.height(), - expected_sortition.block_height - ); - - if stacks_tip_ch != last_stacks_tip_ch - || stacks_tip_ch == header.consensus_hash - || last_burnchain_sync + deadline < get_epoch_time_secs() - { - boot_peer.next_burnchain_block_raw_sortition_only(burn_ops.clone()); - burnchain_ptr += 1; - last_burnchain_sync = get_epoch_time_secs(); - } - } - - last_stacks_tip_ch = stacks_tip_ch; - last_stacks_tip_bhh = stacks_tip_bhh; - - debug!( - "Booting peer's stacks tip is now {:?}", - &boot_peer.network.stacks_tip - ); - if stacks_tip_ch == canonical_stacks_tip_ch { - break; - } - } - - term_sx.send(()).unwrap(); - }); - - loop { - if term_rx.try_recv().is_ok() { - break; - } - peer.step_with_ibd(false).unwrap(); - } - }); - - boot_dns_thread_handle.join().unwrap(); -} diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index ca5c0818db4..93213a0e660 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -54,7 +54,7 @@ use crate::stacks_common::types::Address; use crate::util_lib::db::Error as DBError; /// Handshake with and get the reward cycle inventories for a range of reward cycles -pub fn peer_get_nakamoto_invs<'a>( +fn peer_get_nakamoto_invs<'a>( mut peer: TestPeer<'a>, reward_cycles: &[u64], ) -> (TestPeer<'a>, Vec) { @@ -325,7 +325,7 @@ fn test_nakamoto_inv_10_extended_tenures_10_sortitions() { /// NOTE: The second return value does _not_ need `<'a>`, since `observer` is never installed into /// the peers here. However, it appears unavoidable to the borrow-checker. -pub fn make_nakamoto_peers_from_invs<'a>( +fn make_nakamoto_peers_from_invs<'a>( test_name: &str, observer: &'a TestEventObserver, rc_len: u32, @@ -399,8 +399,6 @@ pub fn make_nakamoto_peers_from_invs<'a>( NakamotoBootStep::TenureExtend(vec![next_stx_transfer()]), NakamotoBootStep::Block(vec![next_stx_transfer()]), NakamotoBootStep::TenureExtend(vec![next_stx_transfer()]), - NakamotoBootStep::Block(vec![next_stx_transfer()]), - NakamotoBootStep::TenureExtend(vec![next_stx_transfer()]), ])); } } @@ -416,7 +414,7 @@ pub fn make_nakamoto_peers_from_invs<'a>( (peer, other_peers) } -pub fn make_nakamoto_peer_from_invs<'a>( +fn make_nakamoto_peer_from_invs<'a>( test_name: &str, observer: &'a TestEventObserver, rc_len: u32, @@ -646,7 +644,7 @@ fn test_nakamoto_tenure_inv() { port: 65535, public_key_hash: Hash160([0x11; 20]), }; - let mut nakamoto_inv = NakamotoTenureInv::new(100, 100, 0, na); + let mut nakamoto_inv = NakamotoTenureInv::new(100, 100, na); assert!(!nakamoto_inv.has_ith_tenure(0)); assert!(!nakamoto_inv.has_ith_tenure(99)); assert!(!nakamoto_inv.has_ith_tenure(100)); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 7c120b46bb9..67212840d7f 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -pub mod download; pub mod httpcore; pub mod inv; pub mod neighbors; @@ -234,7 +233,6 @@ impl NakamotoBootPlan { &mut sort_handle, &mut node.chainstate, block.clone(), - None, ) .unwrap(); if accepted { @@ -250,7 +248,6 @@ impl NakamotoBootPlan { peer.sortdb = Some(sortdb); peer.stacks_node = Some(node); - peer.refresh_burnchain_view(); } } @@ -597,7 +594,6 @@ impl NakamotoBootPlan { txs }); - peer.refresh_burnchain_view(); consensus_hashes.push(next_consensus_hash); let blocks: Vec = blocks_and_sizes @@ -711,7 +707,6 @@ impl NakamotoBootPlan { blocks_since_last_tenure += 1; txs }); - peer.refresh_burnchain_view(); consensus_hashes.push(consensus_hash); let blocks: Vec = blocks_and_sizes diff --git a/stackslib/src/proptesting/burnchains.rs b/stackslib/src/proptesting/burnchains.rs new file mode 100644 index 00000000000..9dfed7887b4 --- /dev/null +++ b/stackslib/src/proptesting/burnchains.rs @@ -0,0 +1,12 @@ +use proptest::prelude::*; +use stacks_common::proptesting::bytes; + +use crate::burnchains::Txid; + +pub fn txid() -> impl Strategy { + bytes(32).prop_map(|vec| { + let arr: [u8; 32] = vec.try_into().expect("failed to generate 32-byte array"); + + Txid(arr) + }) +} diff --git a/stackslib/src/proptesting/mod.rs b/stackslib/src/proptesting/mod.rs new file mode 100644 index 00000000000..7b9319e9e25 --- /dev/null +++ b/stackslib/src/proptesting/mod.rs @@ -0,0 +1,2 @@ +pub mod burnchains; +pub mod net; diff --git a/stackslib/src/proptesting/net.rs b/stackslib/src/proptesting/net.rs new file mode 100644 index 00000000000..eb38f4c94eb --- /dev/null +++ b/stackslib/src/proptesting/net.rs @@ -0,0 +1,54 @@ +use clarity::proptesting::qualified_contract_identifier; +use proptest::prelude::*; +use stacks_common::proptesting::{hash_160, stacks_block_id}; + +use super::burnchains::txid; +use crate::net::atlas::{Attachment, AttachmentInstance}; + +pub fn attachment_instance() -> impl Strategy { + ( + // content_hash: Hash160 + hash_160(), + // attachment_index: u32 + any::(), + // stacks_block_height: u64 + any::(), + // index_block_hash: StacksBlockId + stacks_block_id(), + // metadata: String + ".*".prop_map(String::from), + // contract_id: QualifiedContractIdentifier + qualified_contract_identifier(), + // tx_id: Txid + txid(), + // canonical_stacks_tip_height: Option + any::>(), + ) + .prop_map( + |( + content_hash, + attachment_index, + stacks_block_height, + index_block_hash, + metadata, + contract_id, + tx_id, + canonical_stacks_tip_height, + )| { + AttachmentInstance { + content_hash, + attachment_index, + stacks_block_height, + index_block_hash, + metadata, + contract_id, + tx_id, + canonical_stacks_tip_height, + } + }, + ) +} + +pub fn attachment() -> impl Strategy { + prop::collection::vec(any::(), 10..256).prop_map(|content| Attachment { content }) +} diff --git a/stackslib/src/util_lib/db.rs b/stackslib/src/util_lib/db.rs index 28aaf310fb5..55122c6c442 100644 --- a/stackslib/src/util_lib/db.rs +++ b/stackslib/src/util_lib/db.rs @@ -135,24 +135,15 @@ impl error::Error for Error { } } -impl From for Error { - #[cfg_attr(test, mutants::skip)] - fn from(e: serde_error) -> Self { - Self::SerializationError(e) - } -} - impl From for Error { - #[cfg_attr(test, mutants::skip)] - fn from(e: sqlite_error) -> Self { - Self::SqliteError(e) + fn from(e: sqlite_error) -> Error { + Error::SqliteError(e) } } impl From for Error { - #[cfg_attr(test, mutants::skip)] - fn from(e: MARFError) -> Self { - Self::IndexError(e) + fn from(e: MARFError) -> Error { + Error::IndexError(e) } } @@ -698,7 +689,7 @@ pub fn tx_begin_immediate_sqlite<'a>(conn: &'a mut Connection) -> Result String { format!("{}", &btc_addr) } -pub fn burnchain_params_from_config(config: &BurnchainConfig) -> BurnchainParameters { - let (network, _) = config.get_bitcoin_network(); - let mut params = BurnchainParameters::from_params(&config.chain, &network) - .expect("Bitcoin network unsupported"); - if let Some(first_burn_block_height) = config.first_burn_block_height { - params.first_block_height = first_burn_block_height; - } - params -} - /// Helper method to create a BitcoinIndexer pub fn make_bitcoin_indexer( config: &Config, should_keep_running: Option>, ) -> BitcoinIndexer { - let burnchain_params = burnchain_params_from_config(&config.burnchain); + let (network, _) = config.burnchain.get_bitcoin_network(); + let burnchain_params = BurnchainParameters::from_params(&config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); let indexer_config = { let burnchain_config = config.burnchain.clone(); BitcoinIndexerConfig { @@ -280,7 +271,7 @@ impl BitcoinRegtestController { ) -> Self { std::fs::create_dir_all(&config.get_burnchain_path_str()) .expect("Unable to create workdir"); - let (_, network_id) = config.burnchain.get_bitcoin_network(); + let (network, network_id) = config.burnchain.get_bitcoin_network(); let res = SpvClient::new( &config.get_spv_headers_file_path(), @@ -295,7 +286,8 @@ impl BitcoinRegtestController { panic!() } - let burnchain_params = burnchain_params_from_config(&config.burnchain); + let burnchain_params = BurnchainParameters::from_params(&config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); if network_id == BitcoinNetworkType::Mainnet && config.burnchain.epochs.is_some() { panic!("It is an error to set custom epochs while running on Mainnet: network_id {:?} config.burnchain {:#?}", @@ -344,7 +336,9 @@ impl BitcoinRegtestController { /// create a dummy bitcoin regtest controller. /// used just for submitting bitcoin ops. pub fn new_dummy(config: Config) -> Self { - let burnchain_params = burnchain_params_from_config(&config.burnchain); + let (network, _) = config.burnchain.get_bitcoin_network(); + let burnchain_params = BurnchainParameters::from_params(&config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); let indexer_config = { let burnchain_config = config.burnchain.clone(); @@ -906,8 +900,7 @@ impl BitcoinRegtestController { BlockstackOperationType::LeaderBlockCommit(_) | BlockstackOperationType::LeaderKeyRegister(_) | BlockstackOperationType::StackStx(_) - | BlockstackOperationType::DelegateStx(_) - | BlockstackOperationType::VoteForAggregateKey(_) => { + | BlockstackOperationType::DelegateStx(_) => { unimplemented!(); } BlockstackOperationType::PreStx(payload) => { @@ -1092,92 +1085,6 @@ impl BitcoinRegtestController { Some(tx) } - #[cfg(test)] - /// Build a vote-for-aggregate-key burn op tx - fn build_vote_for_aggregate_key_tx( - &mut self, - epoch_id: StacksEpochId, - payload: VoteForAggregateKeyOp, - signer: &mut BurnchainOpSigner, - utxo_to_use: Option, - ) -> Option { - let public_key = signer.get_public_key(); - let max_tx_size = 230; - - let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { - ( - Transaction { - input: vec![], - output: vec![], - version: 1, - lock_time: 0, - }, - UTXOSet { - bhh: BurnchainHeaderHash::zero(), - utxos: vec![utxo], - }, - ) - } else { - self.prepare_tx( - epoch_id, - &public_key, - DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config), - None, - None, - 0, - )? - }; - - // Serialize the payload - let op_bytes = { - let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; - bytes - }; - - let consensus_output = TxOut { - value: 0, - script_pubkey: Builder::new() - .push_opcode(opcodes::All::OP_RETURN) - .push_slice(&op_bytes) - .into_script(), - }; - - tx.output = vec![consensus_output]; - - self.finalize_tx( - epoch_id, - &mut tx, - DUST_UTXO_LIMIT, - 0, - max_tx_size, - get_satoshis_per_byte(&self.config), - &mut utxos, - signer, - )?; - - increment_btc_ops_sent_counter(); - - info!( - "Miner node: submitting vote for aggregate key op - {}", - public_key.to_hex() - ); - - Some(tx) - } - - #[cfg(not(test))] - /// Build a vote-for-aggregate-key burn op tx - fn build_vote_for_aggregate_key_tx( - &mut self, - _epoch_id: StacksEpochId, - _payload: VoteForAggregateKeyOp, - _signer: &mut BurnchainOpSigner, - _utxo_to_use: Option, - ) -> Option { - unimplemented!() - } - #[cfg(not(test))] fn build_pre_stacks_tx( &mut self, @@ -1242,92 +1149,6 @@ impl BitcoinRegtestController { Some(tx) } - #[cfg(not(test))] - fn build_stack_stx_tx( - &mut self, - _epoch_id: StacksEpochId, - _payload: StackStxOp, - _signer: &mut BurnchainOpSigner, - _utxo_to_use: Option, - ) -> Option { - unimplemented!() - } - - #[cfg(test)] - fn build_stack_stx_tx( - &mut self, - epoch_id: StacksEpochId, - payload: StackStxOp, - signer: &mut BurnchainOpSigner, - utxo_to_use: Option, - ) -> Option { - let public_key = signer.get_public_key(); - let max_tx_size = 250; - - let (mut tx, mut utxos) = if let Some(utxo) = utxo_to_use { - ( - Transaction { - input: vec![], - output: vec![], - version: 1, - lock_time: 0, - }, - UTXOSet { - bhh: BurnchainHeaderHash::zero(), - utxos: vec![utxo], - }, - ) - } else { - self.prepare_tx( - epoch_id, - &public_key, - DUST_UTXO_LIMIT + max_tx_size * get_satoshis_per_byte(&self.config), - None, - None, - 0, - )? - }; - - // Serialize the payload - let op_bytes = { - let mut bytes = self.config.burnchain.magic_bytes.as_bytes().to_vec(); - payload.consensus_serialize(&mut bytes).ok()?; - bytes - }; - - let consensus_output = TxOut { - value: 0, - script_pubkey: Builder::new() - .push_opcode(opcodes::All::OP_RETURN) - .push_slice(&op_bytes) - .into_script(), - }; - - tx.output = vec![consensus_output]; - tx.output - .push(payload.reward_addr.to_bitcoin_tx_out(DUST_UTXO_LIMIT)); - - self.finalize_tx( - epoch_id, - &mut tx, - DUST_UTXO_LIMIT, - 0, - max_tx_size, - get_satoshis_per_byte(&self.config), - &mut utxos, - signer, - )?; - - increment_btc_ops_sent_counter(); - - info!( - "Miner node: submitting stack-stx op - {}", - public_key.to_hex() - ); - - Some(tx) - } - fn magic_bytes(&self) -> Vec { #[cfg(test)] { @@ -1641,7 +1462,7 @@ impl BitcoinRegtestController { ) { Some(utxos) => utxos, None => { - warn!( + debug!( "No UTXOs for {} ({}) in epoch {}", &public_key.to_hex(), &addr2str(&addr), @@ -2004,15 +1825,10 @@ impl BitcoinRegtestController { BlockstackOperationType::TransferStx(payload) => { self.build_transfer_stacks_tx(epoch_id, payload, op_signer, None) } - BlockstackOperationType::StackStx(_payload) => { - self.build_stack_stx_tx(epoch_id, _payload, op_signer, None) - } + BlockstackOperationType::StackStx(_payload) => unimplemented!(), BlockstackOperationType::DelegateStx(payload) => { self.build_delegate_stacks_tx(epoch_id, payload, op_signer, None) } - BlockstackOperationType::VoteForAggregateKey(payload) => { - self.build_vote_for_aggregate_key_tx(epoch_id, payload, op_signer, None) - } }; transaction.map(|tx| SerializedTx::new(tx)) diff --git a/testnet/stacks-node/src/burnchains/mocknet_controller.rs b/testnet/stacks-node/src/burnchains/mocknet_controller.rs index e31d843c05d..7b43ce1c8f7 100644 --- a/testnet/stacks-node/src/burnchains/mocknet_controller.rs +++ b/testnet/stacks-node/src/burnchains/mocknet_controller.rs @@ -10,7 +10,7 @@ use stacks::chainstate::burn::db::sortdb::{SortitionDB, SortitionHandleTx}; use stacks::chainstate::burn::operations::leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS; use stacks::chainstate::burn::operations::{ BlockstackOperationType, DelegateStxOp, LeaderBlockCommitOp, LeaderKeyRegisterOp, PreStxOp, - StackStxOp, TransferStxOp, VoteForAggregateKeyOp, + StackStxOp, TransferStxOp, }; use stacks::chainstate::burn::BlockSnapshot; use stacks::core::{ @@ -249,13 +249,6 @@ impl BurnchainController for MocknetController { ..payload }) } - BlockstackOperationType::VoteForAggregateKey(payload) => { - BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { - block_height: next_block_header.block_height, - burn_header_hash: next_block_header.block_hash, - ..payload - }) - } }; ops.push(op); } diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index 7901b6d8751..18640a5f454 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -1,7 +1,6 @@ -use std::collections::{HashMap, HashSet}; -use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; +use std::collections::HashSet; +use std::net::{SocketAddr, ToSocketAddrs}; use std::path::PathBuf; -use std::str::FromStr; use std::sync::{Arc, Mutex}; use std::time::Duration; use std::{fs, thread}; @@ -10,8 +9,6 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; use lazy_static::lazy_static; use rand::RngCore; -use serde::Deserialize; -use stacks::burnchains::affirmation::AffirmationMap; use stacks::burnchains::bitcoin::BitcoinNetworkType; use stacks::burnchains::{Burnchain, MagicBytes, BLOCKSTACK_MAGIC_MAINNET}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; @@ -33,7 +30,6 @@ use stacks::cost_estimates::{CostEstimator, FeeEstimator, PessimisticEstimator, use stacks::net::atlas::AtlasConfig; use stacks::net::connection::ConnectionOptions; use stacks::net::{Neighbor, NeighborKey}; -use stacks::types::chainstate::BurnchainHeaderHash; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::Error as DBError; use stacks_common::consts::SIGNER_SLOTS_PER_USER; @@ -201,74 +197,6 @@ mod tests { Some("password".to_string()) ); } - - #[test] - fn should_load_affirmation_map() { - let affirmation_string = "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa"; - let affirmation = - AffirmationMap::decode(affirmation_string).expect("Failed to decode affirmation map"); - let config = Config::from_config_file( - ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 1 - affirmation = "{affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"), - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert_eq!(config.burnchain.affirmation_overrides.len(), 1); - assert_eq!(config.burnchain.affirmation_overrides.get(&0), None); - assert_eq!( - config.burnchain.affirmation_overrides.get(&1), - Some(&affirmation) - ); - } - - #[test] - fn should_fail_to_load_invalid_affirmation_map() { - let bad_affirmation_string = "bad_map"; - let file = ConfigFile::from_str(&format!( - r#" - [[burnchain.affirmation_overrides]] - reward_cycle = 1 - affirmation = "{bad_affirmation_string}" - "# - )) - .expect("Expected to be able to parse config file from string"); - - assert!(Config::from_config_file(file).is_err()); - } - - #[test] - fn should_load_empty_affirmation_map() { - let config = Config::from_config_file( - ConfigFile::from_str(r#""#) - .expect("Expected to be able to parse config file from string"), - ) - .expect("Expected to be able to parse affirmation map from file"); - - assert!(config.burnchain.affirmation_overrides.is_empty()); - } - - #[test] - fn should_include_xenon_default_affirmation_overrides() { - let config = Config::from_config_file( - ConfigFile::from_str( - r#" - [burnchain] - chain = "bitcoin" - mode = "xenon" - "#, - ) - .expect("Expected to be able to parse config file from string"), - ) - .expect("Expected to be able to parse affirmation map from file"); - // Should default add xenon affirmation overrides - assert_eq!(config.burnchain.affirmation_overrides.len(), 3); - } } impl ConfigFile { @@ -294,17 +222,15 @@ impl ConfigFile { } pub fn xenon() -> ConfigFile { - let mut burnchain = BurnchainConfigFile { + let burnchain = BurnchainConfigFile { mode: Some("xenon".to_string()), rpc_port: Some(18332), peer_port: Some(18333), - peer_host: Some("bitcoind.testnet.stacks.co".to_string()), + peer_host: Some("bitcoind.xenon.blockstack.org".to_string()), magic_bytes: Some("T2".into()), ..BurnchainConfigFile::default() }; - burnchain.add_affirmation_overrides_xenon(); - let node = NodeConfigFile { bootstrap_node: Some("029266faff4c8e0ca4f934f34996a96af481df94a89b0c9bd515f3536a95682ddc@seed.testnet.hiro.so:30444".to_string()), miner: Some(false), @@ -537,31 +463,6 @@ impl Config { return; } - if let Some(first_burn_block_height) = self.burnchain.first_burn_block_height { - debug!( - "Override first_block_height from {} to {}", - burnchain.first_block_height, first_burn_block_height - ); - burnchain.first_block_height = first_burn_block_height; - } - - if let Some(first_burn_block_timestamp) = self.burnchain.first_burn_block_timestamp { - debug!( - "Override first_block_timestamp from {} to {}", - burnchain.first_block_timestamp, first_burn_block_timestamp - ); - burnchain.first_block_timestamp = first_burn_block_timestamp; - } - - if let Some(first_burn_block_hash) = &self.burnchain.first_burn_block_hash { - debug!( - "Override first_burn_block_hash from {} to {}", - burnchain.first_block_hash, first_burn_block_hash - ); - burnchain.first_block_hash = BurnchainHeaderHash::from_hex(&first_burn_block_hash) - .expect("Invalid first_burn_block_hash"); - } - if let Some(pox_prepare_length) = self.burnchain.pox_prepare_length { debug!("Override pox_prepare_length to {pox_prepare_length}"); burnchain.pox_constants.prepare_length = pox_prepare_length; @@ -581,28 +482,7 @@ impl Config { } if let Some(epochs) = &self.burnchain.epochs { - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch10) - { - // Epoch 1.0 start height can be equal to the first block height iff epoch 2.0 - // start height is also equal to the first block height. - assert!( - epoch.start_height <= burnchain.first_block_height, - "FATAL: Epoch 1.0 start height must be at or before the first block height" - ); - } - - if let Some(epoch) = epochs - .iter() - .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch20) - { - assert_eq!( - epoch.start_height, burnchain.first_block_height, - "FATAL: Epoch 2.0 start height must match the first block height" - ); - } - + // Iterate through the epochs vector and find the item where epoch_id == StacksEpochId::Epoch22 if let Some(epoch) = epochs .iter() .find(|epoch| epoch.epoch_id == StacksEpochId::Epoch21) @@ -672,7 +552,9 @@ impl Config { } // check if the Epoch 3.0 burnchain settings as configured are going to be valid. - self.check_nakamoto_config(&burnchain); + if self.burnchain.mode == "nakamoto-neon" { + self.check_nakamoto_config(&burnchain); + } } fn check_nakamoto_config(&self, burnchain: &Burnchain) { @@ -828,6 +710,7 @@ impl Config { ); } + // epochs must be a prefix of [1.0, 2.0, 2.05, 2.1] let expected_list = [ StacksEpochId::Epoch10, StacksEpochId::Epoch20, @@ -985,8 +868,10 @@ impl Config { node.require_affirmed_anchor_blocks = false; } - if node.stacker || node.miner { + if (node.stacker || node.miner) && burnchain.mode == "nakamoto-neon" { node.add_miner_stackerdb(is_mainnet); + } + if (node.stacker || node.miner) && burnchain.mode == "nakamoto-neon" { node.add_signers_stackerdbs(is_mainnet); } @@ -1226,7 +1111,6 @@ impl Config { filter_origins: miner_config.filter_origins, }, miner_status, - confirm_microblocks: true, } } @@ -1289,9 +1173,6 @@ pub struct BurnchainConfig { pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, pub rbf_fee_increment: u64, - pub first_burn_block_height: Option, - pub first_burn_block_timestamp: Option, - pub first_burn_block_hash: Option, /// Custom override for the definitions of the epochs. This will only be applied for testnet and /// regtest nodes. pub epochs: Option>, @@ -1302,7 +1183,6 @@ pub struct BurnchainConfig { pub sunset_end: Option, pub wallet_name: String, pub ast_precheck_size_height: Option, - pub affirmation_overrides: HashMap, } impl BurnchainConfig { @@ -1330,9 +1210,6 @@ impl BurnchainConfig { leader_key_tx_estimated_size: LEADER_KEY_TX_ESTIM_SIZE, block_commit_tx_estimated_size: BLOCK_COMMIT_TX_ESTIM_SIZE, rbf_fee_increment: DEFAULT_RBF_FEE_RATE_INCREMENT, - first_burn_block_height: None, - first_burn_block_timestamp: None, - first_burn_block_hash: None, epochs: None, pox_2_activation: None, pox_prepare_length: None, @@ -1341,7 +1218,6 @@ impl BurnchainConfig { sunset_end: None, wallet_name: "".to_string(), ast_precheck_size_height: None, - affirmation_overrides: HashMap::new(), } } pub fn get_rpc_url(&self, wallet: Option) -> String { @@ -1396,12 +1272,6 @@ pub const EPOCH_CONFIG_2_4_0: &'static str = "2.4"; pub const EPOCH_CONFIG_2_5_0: &'static str = "2.5"; pub const EPOCH_CONFIG_3_0_0: &'static str = "3.0"; -#[derive(Clone, Deserialize, Default, Debug)] -pub struct AffirmationOverride { - pub reward_cycle: u64, - pub affirmation: String, -} - #[derive(Clone, Deserialize, Default, Debug)] pub struct BurnchainConfigFile { pub chain: Option, @@ -1424,9 +1294,6 @@ pub struct BurnchainConfigFile { pub block_commit_tx_estimated_size: Option, pub rbf_fee_increment: Option, pub max_rbf: Option, - pub first_burn_block_height: Option, - pub first_burn_block_timestamp: Option, - pub first_burn_block_hash: Option, pub epochs: Option>, pub pox_prepare_length: Option, pub pox_reward_length: Option, @@ -1435,38 +1302,9 @@ pub struct BurnchainConfigFile { pub sunset_end: Option, pub wallet_name: Option, pub ast_precheck_size_height: Option, - pub affirmation_overrides: Option>, } impl BurnchainConfigFile { - /// Add affirmation overrides required to sync Xenon Testnet node. - /// - /// The Xenon Testnet Stacks 2.4 activation height occurred before the finalized SIP-024 updates and release of the stacks-node versioned 2.4.0.0.0. - /// This caused the Stacks Xenon testnet to undergo a deep reorg when 2.4.0.0.0 was finalized. This deep reorg meant that 3 reward cycles were - /// invalidated, which requires overrides in the affirmation map to continue correct operation. Those overrides are required for cycles 413, 414, and 415. - pub fn add_affirmation_overrides_xenon(&mut self) { - let default_overrides = vec![ - AffirmationOverride { - reward_cycle: 413, - affirmation: "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpa".to_string() - }, - AffirmationOverride { - reward_cycle: 414, - affirmation: "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpaa".to_string() - }, - AffirmationOverride { - reward_cycle: 415, - affirmation: "nnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnpppppnnnnnnnnnnnnnnnnnnnnnnnpppppppppppppppnnnnnnnnnnnnnnnnnnnnnnnppppppppppnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnppppppppnnnnnnnnnnnnnnnnnnnnnnnppnppnnnnnnnnnnnnnnnnnnnnnnnppppnnnnnnnnnnnnnnnnnnnnnnnnnppppppnnnnnnnnnnnnnnnnnnnnnnnnnppnnnnnnnnnnnnnnnnnnnnnnnnnpppppppnnnnnnnnnnnnnnnnnnnnnnnnnnpnnnnnnnnnnnnnnnnnnnnnnnnnpppnppppppppppppppnnppppnpaaa".to_string() - }]; - if let Some(affirmation_overrides) = self.affirmation_overrides.as_mut() { - for affirmation in default_overrides { - affirmation_overrides.push(affirmation); - } - } else { - self.affirmation_overrides = Some(default_overrides); - }; - } - fn into_config_default( mut self, default_burnchain_config: BurnchainConfig, @@ -1475,7 +1313,6 @@ impl BurnchainConfigFile { if self.magic_bytes.is_none() { self.magic_bytes = ConfigFile::xenon().burnchain.unwrap().magic_bytes; } - self.add_affirmation_overrides_xenon(); } let mode = self.mode.unwrap_or(default_burnchain_config.mode); @@ -1494,19 +1331,6 @@ impl BurnchainConfigFile { } } - let mut affirmation_overrides = HashMap::new(); - if let Some(aos) = self.affirmation_overrides { - for ao in aos { - let Some(affirmation_map) = AffirmationMap::decode(&ao.affirmation) else { - return Err(format!( - "Invalid affirmation override for reward cycle {}: {}", - ao.reward_cycle, ao.affirmation - )); - }; - affirmation_overrides.insert(ao.reward_cycle, affirmation_map); - } - } - let mut config = BurnchainConfig { chain: self.chain.unwrap_or(default_burnchain_config.chain), chain_id: if is_mainnet { @@ -1579,16 +1403,6 @@ impl BurnchainConfigFile { rbf_fee_increment: self .rbf_fee_increment .unwrap_or(default_burnchain_config.rbf_fee_increment), - first_burn_block_height: self - .first_burn_block_height - .or(default_burnchain_config.first_burn_block_height), - first_burn_block_timestamp: self - .first_burn_block_timestamp - .or(default_burnchain_config.first_burn_block_timestamp), - first_burn_block_hash: self - .first_burn_block_hash - .clone() - .or(default_burnchain_config.first_burn_block_hash.clone()), // will be overwritten below epochs: default_burnchain_config.epochs, ast_precheck_size_height: self.ast_precheck_size_height, @@ -1606,7 +1420,6 @@ impl BurnchainConfigFile { pox_prepare_length: self .pox_prepare_length .or(default_burnchain_config.pox_prepare_length), - affirmation_overrides, }; if let BitcoinNetworkType::Mainnet = config.get_bitcoin_network().1 { @@ -1617,13 +1430,6 @@ impl BurnchainConfigFile { { return Err("PoX-2 parameters are not configurable in mainnet".into()); } - // Check that the first burn block options are not set in mainnet - if config.first_burn_block_height.is_some() - || config.first_burn_block_timestamp.is_some() - || config.first_burn_block_hash.is_some() - { - return Err("First burn block parameters are not configurable in mainnet".into()); - } } if let Some(ref conf_epochs) = self.epochs { @@ -1958,18 +1764,6 @@ impl Default for NodeConfig { } impl NodeConfig { - /// Get a SocketAddr for this node's RPC endpoint which uses the loopback address - pub fn get_rpc_loopback(&self) -> Option { - let rpc_port = SocketAddr::from_str(&self.rpc_bind) - .or_else(|e| { - error!("Could not parse node.rpc_bind configuration setting as SocketAddr: {e}"); - Err(()) - }) - .ok()? - .port(); - Some(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), rpc_port)) - } - pub fn add_signers_stackerdbs(&mut self, is_mainnet: bool) { for signer_set in 0..2 { for message_id in 0..SIGNER_SLOTS_PER_USER { diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 665334e924c..722ddc7af03 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -1,7 +1,5 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use std::sync::mpsc::{channel, Receiver, Sender}; -use std::sync::Mutex; use std::thread::sleep; use std::time::Duration; @@ -19,12 +17,10 @@ use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::BlockEventDispatcher; use stacks::chainstate::nakamoto::NakamotoBlock; use stacks::chainstate::stacks::address::PoxAddress; -use stacks::chainstate::stacks::boot::{ - NakamotoSignerEntry, PoxStartCycleInfo, RewardSet, RewardSetData, SIGNERS_NAME, -}; +use stacks::chainstate::stacks::boot::RewardSetData; use stacks::chainstate::stacks::db::accounts::MinerReward; use stacks::chainstate::stacks::db::unconfirmed::ProcessedUnconfirmedState; -use stacks::chainstate::stacks::db::{MinerRewardInfo, StacksBlockHeaderTypes, StacksHeaderInfo}; +use stacks::chainstate::stacks::db::{MinerRewardInfo, StacksHeaderInfo}; use stacks::chainstate::stacks::events::{ StackerDBChunksEvent, StacksBlockEventData, StacksTransactionEvent, StacksTransactionReceipt, TransactionOrigin, @@ -40,12 +36,9 @@ use stacks::net::api::postblock_proposal::{ }; use stacks::net::atlas::{Attachment, AttachmentInstance}; use stacks::net::stackerdb::StackerDBEventDispatcher; -use stacks::util::hash::to_hex; -use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, StacksBlockId}; -use stacks_common::util::hash::{bytes_to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::MessageSignature; +use stacks_common::util::hash::bytes_to_hex; use super::config::{EventKeyType, EventObserverConfig}; @@ -80,31 +73,6 @@ pub const PATH_BLOCK_PROCESSED: &str = "new_block"; pub const PATH_ATTACHMENT_PROCESSED: &str = "attachments/new"; pub const PATH_PROPOSAL_RESPONSE: &str = "proposal_response"; -pub static STACKER_DB_CHANNEL: StackerDBChannel = StackerDBChannel::new(); - -/// This struct receives StackerDB event callbacks without registering -/// over the JSON/RPC interface. To ensure that any event observer -/// uses the same channel, we use a lazy_static global for the channel (this -/// implements a singleton using STACKER_DB_CHANNEL). -/// -/// This is in place because a Nakamoto miner needs to receive -/// StackerDB events. It could either poll the database (seems like a -/// bad idea) or listen for events. Registering for RPC callbacks -/// seems bad. So instead, it uses a singleton sync channel. -pub struct StackerDBChannel { - sender_info: Mutex>, -} - -#[derive(Clone)] -struct InnerStackerDBChannel { - /// A channel for sending the chunk events to the listener - sender: Sender, - /// Does the listener want to receive `.signers` chunks? - interested_in_signers: bool, - /// Which StackerDB contracts is the listener interested in? - other_interests: Vec, -} - #[derive(Clone, Debug, Serialize, Deserialize)] pub struct MinedBlockEvent { pub target_burn_height: u64, @@ -133,164 +101,7 @@ pub struct MinedNakamotoBlockEvent { pub stacks_height: u64, pub block_size: u64, pub cost: ExecutionCost, - pub miner_signature: MessageSignature, - pub signer_signature_hash: Sha512Trunc256Sum, pub tx_events: Vec, - pub signer_bitvec: String, -} - -impl InnerStackerDBChannel { - pub fn new_miner_receiver() -> (Receiver, Self) { - let (sender, recv) = channel(); - let sender_info = Self { - sender, - interested_in_signers: true, - other_interests: vec![], - }; - - (recv, sender_info) - } -} - -impl StackerDBChannel { - pub const fn new() -> Self { - Self { - sender_info: Mutex::new(None), - } - } - - /// Consume the receiver for the StackerDBChannel and drop the senders. This should be done - /// before another interested thread can subscribe to events, but it is not absolutely necessary - /// to do so (it would just result in temporary over-use of memory while the prior channel is still - /// open). - /// - /// The StackerDBChnnel's receiver is guarded with a Mutex, so that ownership can - /// be taken by different threads without unsafety. - pub fn replace_receiver(&self, receiver: Receiver) { - // not strictly necessary, but do this rather than mark the `receiver` argument as unused - // so that we're explicit about the fact that `replace_receiver` consumes. - drop(receiver); - let mut guard = self - .sender_info - .lock() - .expect("FATAL: poisoned StackerDBChannel lock"); - guard.take(); - } - - /// Create a new event receiver channel for receiving events relevant to the miner coordinator, - /// dropping the old StackerDB event sender channels if they are still registered. - /// Returns the new receiver channel and a bool indicating whether or not sender channels were - /// still in place. - /// - /// The StackerDBChannel senders are guarded by mutexes so that they can be replaced - /// by different threads without unsafety. - pub fn register_miner_coordinator(&self) -> (Receiver, bool) { - let mut sender_info = self - .sender_info - .lock() - .expect("FATAL: poisoned StackerDBChannel lock"); - let (recv, new_sender) = InnerStackerDBChannel::new_miner_receiver(); - let replaced_receiver = sender_info.replace(new_sender).is_some(); - - (recv, replaced_receiver) - } - - /// Is there a thread holding the receiver, and is it interested in chunks events from `stackerdb`? - /// Returns the a sending channel to broadcast the event to if so, and `None` if not. - pub fn is_active( - &self, - stackerdb: &QualifiedContractIdentifier, - ) -> Option> { - // if the receiver field is empty (i.e., None), then there is no listening thread, return None - let guard = self - .sender_info - .lock() - .expect("FATAL: poisoned StackerDBChannel lock"); - let sender_info = guard.as_ref()?; - if sender_info.interested_in_signers - && stackerdb.is_boot() - && stackerdb.name.starts_with(SIGNERS_NAME) - { - return Some(sender_info.sender.clone()); - } - if sender_info.other_interests.contains(stackerdb) { - return Some(sender_info.sender.clone()); - } - None - } -} - -fn serialize_u128_as_string(value: &u128, serializer: S) -> Result -where - S: serde::Serializer, -{ - serializer.serialize_str(&value.to_string()) -} - -fn serialize_pox_addresses(value: &Vec, serializer: S) -> Result -where - S: serde::Serializer, -{ - serializer.collect_seq(value.iter().cloned().map(|a| a.to_b58())) -} - -fn serialize_optional_u128_as_string( - value: &Option, - serializer: S, -) -> Result -where - S: serde::Serializer, -{ - match value { - Some(v) => serializer.serialize_str(&v.to_string()), - None => serializer.serialize_none(), - } -} - -fn hex_serialize(addr: &[u8; 33], s: S) -> Result { - s.serialize_str(&to_hex(addr)) -} - -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct RewardSetEventPayload { - #[serde(serialize_with = "serialize_pox_addresses")] - pub rewarded_addresses: Vec, - pub start_cycle_state: PoxStartCycleInfo, - #[serde(skip_serializing_if = "Option::is_none", default)] - // only generated for nakamoto reward sets - pub signers: Option>, - #[serde(serialize_with = "serialize_optional_u128_as_string")] - pub pox_ustx_threshold: Option, -} - -#[derive(Debug, PartialEq, Clone, Serialize)] -pub struct NakamotoSignerEntryPayload { - #[serde(serialize_with = "hex_serialize")] - pub signing_key: [u8; 33], - #[serde(serialize_with = "serialize_u128_as_string")] - pub stacked_amt: u128, - pub weight: u32, -} - -impl RewardSetEventPayload { - pub fn signer_entry_to_payload(entry: &NakamotoSignerEntry) -> NakamotoSignerEntryPayload { - NakamotoSignerEntryPayload { - signing_key: entry.signing_key, - stacked_amt: entry.stacked_amt, - weight: entry.weight, - } - } - pub fn from_reward_set(reward_set: &RewardSet) -> Self { - Self { - rewarded_addresses: reward_set.rewarded_addresses.clone(), - start_cycle_state: reward_set.start_cycle_state.clone(), - signers: reward_set - .signers - .as_ref() - .map(|signers| signers.iter().map(Self::signer_entry_to_payload).collect()), - pox_ustx_threshold: reward_set.pox_ustx_threshold, - } - } } impl EventObserver { @@ -577,7 +388,6 @@ impl EventObserver { mblock_confirmed_consumed: &ExecutionCost, pox_constants: &PoxConstants, reward_set_data: &Option, - signer_bitvec_opt: &Option>, ) -> serde_json::Value { // Serialize events to JSON let serialized_events: Vec = filtered_events @@ -597,20 +407,6 @@ impl EventObserver { tx_index += 1; } - let signer_bitvec_value = signer_bitvec_opt - .as_ref() - .map(|bitvec| serde_json::to_value(bitvec).unwrap_or_default()) - .unwrap_or_default(); - - let (reward_set_value, cycle_number_value) = match &reward_set_data { - Some(data) => ( - serde_json::to_value(&RewardSetEventPayload::from_reward_set(&data.reward_set)) - .unwrap_or_default(), - serde_json::to_value(data.cycle_number).unwrap_or_default(), - ), - None => (serde_json::Value::Null, serde_json::Value::Null), - }; - // Wrap events let mut payload = json!({ "block_hash": format!("0x{}", block.block_hash), @@ -635,29 +431,16 @@ impl EventObserver { "pox_v1_unlock_height": pox_constants.v1_unlock_height, "pox_v2_unlock_height": pox_constants.v2_unlock_height, "pox_v3_unlock_height": pox_constants.v3_unlock_height, - "signer_bitvec": signer_bitvec_value, - "reward_set": reward_set_value, - "cycle_number": cycle_number_value, }); - let as_object_mut = payload.as_object_mut().unwrap(); - - if let StacksBlockHeaderTypes::Nakamoto(ref header) = &metadata.anchored_header { - as_object_mut.insert( - "signer_signature_hash".into(), - format!("0x{}", header.signer_signature_hash()).into(), - ); - as_object_mut.insert( - "signer_signature".into(), - format!("0x{}", header.signer_signature_hash()).into(), + if let Some(reward_set_data) = reward_set_data { + payload.as_object_mut().unwrap().insert( + "reward_set".to_string(), + serde_json::to_value(&reward_set_data.reward_set).unwrap_or_default(), ); - as_object_mut.insert( - "miner_signature".into(), - format!("0x{}", &header.miner_signature).into(), - ); - as_object_mut.insert( - "signer_signature".into(), - format!("0x{}", &header.signer_signature).into(), + payload.as_object_mut().unwrap().insert( + "cycle_number".to_string(), + serde_json::to_value(reward_set_data.cycle_number).unwrap_or_default(), ); } @@ -819,7 +602,6 @@ impl BlockEventDispatcher for EventDispatcher { mblock_confirmed_consumed: &ExecutionCost, pox_constants: &PoxConstants, reward_set_data: &Option, - signer_bitvec: &Option>, ) { self.process_chain_tip( block, @@ -836,7 +618,6 @@ impl BlockEventDispatcher for EventDispatcher { mblock_confirmed_consumed, pox_constants, reward_set_data, - signer_bitvec, ); } @@ -1018,7 +799,6 @@ impl EventDispatcher { mblock_confirmed_consumed: &ExecutionCost, pox_constants: &PoxConstants, reward_set_data: &Option, - signer_bitvec: &Option>, ) { let all_receipts = receipts.to_owned(); let (dispatch_matrix, events) = self.create_dispatch_matrix_and_event_vector(&all_receipts); @@ -1069,7 +849,6 @@ impl EventDispatcher { mblock_confirmed_consumed, pox_constants, reward_set_data, - signer_bitvec, ); // Send payload @@ -1244,12 +1023,6 @@ impl EventDispatcher { return; } - let signer_bitvec = serde_json::to_value(block.header.signer_bitvec.clone()) - .unwrap_or_default() - .as_str() - .unwrap_or_default() - .to_string(); - let payload = serde_json::to_value(MinedNakamotoBlockEvent { target_burn_height, block_hash: block.header.block_hash().to_string(), @@ -1258,9 +1031,6 @@ impl EventDispatcher { block_size: block_size_bytes, cost: consumed.clone(), tx_events, - miner_signature: block.header.miner_signature.clone(), - signer_signature_hash: block.header.signer_signature_hash(), - signer_bitvec, }) .unwrap(); @@ -1274,30 +1044,19 @@ impl EventDispatcher { pub fn process_new_stackerdb_chunks( &self, contract_id: QualifiedContractIdentifier, - modified_slots: Vec, + new_chunks: Vec, ) { let interested_observers = self.filter_observers(&self.stackerdb_observers_lookup, false); - let interested_receiver = STACKER_DB_CHANNEL.is_active(&contract_id); - if interested_observers.is_empty() && interested_receiver.is_none() { + if interested_observers.len() < 1 { return; } - let event = StackerDBChunksEvent { + let payload = serde_json::to_value(StackerDBChunksEvent { contract_id, - modified_slots, - }; - let payload = serde_json::to_value(&event) - .expect("FATAL: failed to serialize StackerDBChunksEvent to JSON"); - - if let Some(channel) = interested_receiver { - if let Err(send_err) = channel.send(event) { - warn!( - "Failed to send StackerDB event to WSTS coordinator channel. Miner thread may have exited."; - "err" => ?send_err - ); - } - } + modified_slots: new_chunks, + }) + .expect("FATAL: failed to serialize StackerDBChunksEvent to JSON"); for observer in interested_observers.iter() { observer.send_stackerdb_chunks(&payload); @@ -1435,7 +1194,6 @@ mod test { use stacks::burnchains::{PoxConstants, Txid}; use stacks::chainstate::stacks::db::StacksHeaderInfo; use stacks::chainstate::stacks::StacksBlock; - use stacks_common::bitvec::BitVec; use stacks_common::types::chainstate::{BurnchainHeaderHash, StacksBlockId}; use crate::event_dispatcher::EventObserver; @@ -1459,7 +1217,6 @@ mod test { let anchored_consumed = ExecutionCost::zero(); let mblock_confirmed_consumed = ExecutionCost::zero(); let pox_constants = PoxConstants::testnet_default(); - let signer_bitvec = BitVec::zeros(2).expect("Failed to create BitVec with length 2"); let payload = observer.make_new_block_processed_payload( filtered_events, @@ -1476,7 +1233,6 @@ mod test { &mblock_confirmed_consumed, &pox_constants, &None, - &Some(signer_bitvec.clone()), ); assert_eq!( payload @@ -1486,15 +1242,5 @@ mod test { .unwrap(), pox_constants.v1_unlock_height as u64 ); - - let expected_bitvec_str = serde_json::to_value(signer_bitvec) - .unwrap_or_default() - .as_str() - .unwrap() - .to_string(); - assert_eq!( - payload.get("signer_bitvec").unwrap().as_str().unwrap(), - expected_bitvec_str - ); } } diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index cb512969c05..bf54c1601df 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -41,7 +41,7 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::db::blocks::DummyEventDispatcher; use stacks::chainstate::stacks::db::StacksChainState; -#[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] +#[cfg(not(target_env = "msvc"))] use tikv_jemallocator::Jemalloc; pub use self::burnchains::{ @@ -57,7 +57,7 @@ use crate::chain_data::MinerStats; use crate::neon_node::{BlockMinerThread, TipCandidate}; use crate::run_loop::boot_nakamoto; -#[cfg(not(any(target_os = "macos", target_os = "windows", target_arch = "arm")))] +#[cfg(not(target_env = "msvc"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; @@ -437,11 +437,13 @@ fn main() { return; } } else if conf.burnchain.mode == "neon" - || conf.burnchain.mode == "nakamoto-neon" || conf.burnchain.mode == "xenon" || conf.burnchain.mode == "krypton" || conf.burnchain.mode == "mainnet" { + let mut run_loop = neon::RunLoop::new(conf); + run_loop.start(None, mine_start.unwrap_or(0)); + } else if conf.burnchain.mode == "nakamoto-neon" { let mut run_loop = boot_nakamoto::BootRunLoop::new(conf).unwrap(); run_loop.start(None, 0); } else { diff --git a/testnet/stacks-node/src/nakamoto_node.rs b/testnet/stacks-node/src/nakamoto_node.rs index 7b7fb32a64a..302382f1708 100644 --- a/testnet/stacks-node/src/nakamoto_node.rs +++ b/testnet/stacks-node/src/nakamoto_node.rs @@ -39,7 +39,6 @@ use crate::run_loop::RegisteredKey; pub mod miner; pub mod peer; pub mod relayer; -pub mod sign_coordinator; use self::peer::PeerThread; use self::relayer::{RelayerDirective, RelayerThread}; @@ -95,11 +94,7 @@ pub enum Error { CannotSelfSign, MiningFailure(ChainstateError), MinerSignatureError(&'static str), - SignerSignatureError(String), - /// A failure occurred while configuring the miner thread - MinerConfigurationFailed(&'static str), - /// An error occurred while operating as the signing coordinator - SigningCoordinatorFailure(String), + SignerSignatureError(&'static str), // The thread that we tried to send to has closed ChannelClosed, } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 3a976aecca2..d93b6c0991b 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -13,7 +13,6 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::HashMap; use std::thread; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -21,16 +20,16 @@ use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; use clarity::vm::clarity::ClarityConnection; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; -use hashbrown::HashSet; use libsigner::{ - BlockProposalSigners, MessageSlotID, SignerMessage, SignerSession, StackerDBSession, + BlockResponse, RejectCode, SignerMessage, SignerSession, StackerDBSession, BLOCK_MSG_ID, + TRANSACTIONS_MSG_ID, }; -use stacks::burnchains::Burnchain; +use stacks::burnchains::{Burnchain, BurnchainParameters}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::{BlockSnapshot, ConsensusHash}; use stacks::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockVote, NakamotoChainState}; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksChainState, StacksHeaderInfo}; use stacks::chainstate::stacks::{ @@ -38,19 +37,19 @@ use stacks::chainstate::stacks::{ TenureChangeCause, TenureChangePayload, ThresholdSignature, TransactionAnchorMode, TransactionPayload, TransactionVersion, }; +use stacks::core::FIRST_BURNCHAIN_CONSENSUS_HASH; use stacks::net::stackerdb::StackerDBs; -use stacks_common::codec::read_next; +use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::types::chainstate::{StacksAddress, StacksBlockId}; -use stacks_common::types::{PrivateKey, StacksEpochId}; -use stacks_common::util::hash::Hash160; +use stacks_common::types::{ + PrivateKey, StacksEpochId, StacksHashMap as HashMap, StacksHashSet as HashSet, +}; +use stacks_common::util::hash::{Hash160, Sha512Trunc256Sum}; use stacks_common::util::vrf::VRFProof; use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; use super::relayer::RelayerThread; -use super::sign_coordinator::SignCoordinator; use super::{Config, Error as NakamotoNodeError, EventDispatcher, Keychain}; -use crate::burnchains::bitcoin_regtest_controller::burnchain_params_from_config; use crate::nakamoto_node::VRF_MOCK_MINER_KEY; use crate::run_loop::nakamoto::Globals; use crate::run_loop::RegisteredKey; @@ -59,6 +58,9 @@ use crate::{neon_node, ChainTip}; /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? const ABORT_TRY_AGAIN_MS: u64 = 200; +/// If the signers have not responded to a block proposal, how long should +/// the miner thread sleep before trying again? +const WAIT_FOR_SIGNERS_MS: u64 = 200; pub enum MinerDirective { /// The miner won sortition so they should begin a new tenure @@ -81,6 +83,8 @@ struct ParentTenureInfo { struct ParentStacksBlockInfo { /// Header metadata for the Stacks block we're going to build on top of stacks_parent_header: StacksHeaderInfo, + /// the total amount burned in the sortition that selected the Stacks block parent + parent_block_total_burn: u64, /// nonce to use for this new block's coinbase transaction coinbase_nonce: u64, parent_tenure: Option, @@ -140,19 +144,16 @@ impl BlockMinerThread { pub fn run_miner(mut self, prior_miner: Option>) { // when starting a new tenure, block the mining thread if its currently running. // the new mining thread will join it (so that the new mining thread stalls, not the relayer) - debug!( - "New miner thread starting"; - "had_prior_miner" => prior_miner.is_some(), - "parent_tenure_id" => %self.parent_tenure_id, - "thread_id" => ?thread::current().id(), - ); if let Some(prior_miner) = prior_miner { Self::stop_miner(&self.globals, prior_miner); } - let mut stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) + let miners_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let stackerdbs = StackerDBs::connect(&self.config.get_stacker_db_file_path(), true) .expect("FATAL: failed to connect to stacker DB"); - - let mut attempts = 0; + let Some(miner_privkey) = self.config.miner.mining_key else { + warn!("No mining key configured, cannot mine"); + return; + }; // now, actually run this tenure loop { let new_block = loop { @@ -178,37 +179,52 @@ impl BlockMinerThread { } }; - if let Some(mut new_block) = new_block { - if let Err(e) = self.propose_block(&new_block, &stackerdbs) { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); - return; - } - - let (aggregate_public_key, signers_signature) = match self.coordinate_signature( + let sort_db = SortitionDB::open( + &self.config.get_burn_db_file_path(), + true, + self.burnchain.pox_constants.clone(), + ) + .expect("FATAL: could not open sortition DB"); + let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()) + .expect("FATAL: could not retrieve chain tip"); + if let Some(new_block) = new_block { + match NakamotoBlockBuilder::make_stackerdb_block_proposal( + &sort_db, + &tip, + &stackerdbs, &new_block, - &mut stackerdbs, - &mut attempts, + &miner_privkey, + &miners_contract_id, ) { - Ok(x) => x, + Ok(Some(chunk)) => { + // Propose the block to the observing signers through the .miners stackerdb instance + let miner_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); + let mut miners_stackerdb = + StackerDBSession::new(&self.config.node.rpc_bind, miner_contract_id); + match miners_stackerdb.put_chunk(&chunk) { + Ok(ack) => { + info!("Proposed block to stackerdb: {ack:?}"); + } + Err(e) => { + warn!("Failed to propose block to stackerdb {e:?}"); + return; + } + } + } + Ok(None) => { + warn!("Failed to propose block to stackerdb: no slot available"); + } Err(e) => { - error!("Unrecoverable error while proposing block to signer set: {e:?}. Ending tenure."); - return; + warn!("Failed to propose block to stackerdb: {e:?}"); } - }; + } + self.globals.counters.bump_naka_proposed_blocks(); - new_block.header.signer_signature = signers_signature; - if let Err(e) = self.broadcast(new_block.clone(), &aggregate_public_key) { - warn!("Error accepting own block: {e:?}. Will try mining again."); - continue; + if let Err(e) = + self.wait_for_signer_signature_and_broadcast(&stackerdbs, new_block.clone()) + { + warn!("Error broadcasting block: {e:?}"); } else { - info!( - "Miner: Block signed by signer set and broadcasted"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "block_hash" => %new_block.header.block_hash(), - "stacks_block_id" => %new_block.header.block_id(), - "block_height" => new_block.header.chain_length, - "consensus_hash" => %new_block.header.consensus_hash, - ); self.globals.coord().announce_new_stacks_block(); } @@ -220,12 +236,6 @@ impl BlockMinerThread { self.mined_blocks.push(new_block); } - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); let wait_start = Instant::now(); while wait_start.elapsed() < self.config.miner.wait_on_interim_blocks { thread::sleep(Duration::from_millis(ABORT_TRY_AGAIN_MS)); @@ -236,208 +246,24 @@ impl BlockMinerThread { } } - fn coordinate_signature( - &mut self, - new_block: &NakamotoBlock, - stackerdbs: &mut StackerDBs, - attempts: &mut u64, - ) -> Result<(Point, ThresholdSignature), NakamotoNodeError> { - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .expect("FATAL: building on a burn block that is before the first burn block"); - - let reward_info = match sort_db.get_preprocessed_reward_set_of(&tip.sortition_id) { - Ok(Some(x)) => x, - Ok(None) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "No reward set found. Cannot initialize miner coordinator.".into(), - )); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failure while fetching reward set. Cannot initialize miner coordinator. {e:?}" - ))); - } - }; - - let Some(reward_set) = reward_info.known_selected_anchor_block_owned() else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Current reward cycle did not select a reward set. Cannot mine!".into(), - )); - }; - - let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) - .expect("FATAL: could not open chainstate DB"); - let sortition_handle = sort_db.index_handle_at_tip(); - let Ok(aggregate_public_key) = NakamotoChainState::get_aggregate_public_key( - &mut chain_state, - &sort_db, - &sortition_handle, - &new_block, - ) else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the active aggregate public key. Cannot mine!".into(), - )); - }; - - #[cfg(test)] - { - // In test mode, short-circuit spinning up the SignCoordinator if the TEST_SIGNING - // channel has been created. This allows integration tests for the stacks-node - // independent of the stacks-signer. - if let Some(signature) = - crate::tests::nakamoto_integrations::TestSigningChannel::get_signature() - { - return Ok((aggregate_public_key, signature)); - } - } - - let miner_privkey_as_scalar = Scalar::from(miner_privkey.as_slice().clone()); - let mut coordinator = SignCoordinator::new( - &reward_set, - reward_cycle, - miner_privkey_as_scalar, - aggregate_public_key, - &stackerdbs, - &self.config, - ) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to initialize the signing coordinator. Cannot mine! {e:?}" - )) - })?; - - *attempts += 1; - let signature = coordinator.begin_sign( - new_block, - *attempts, - &tip, - &self.burnchain, - &sort_db, - &stackerdbs, - )?; - - Ok((aggregate_public_key, signature)) - } - - fn propose_block( - &mut self, - new_block: &NakamotoBlock, - stackerdbs: &StackerDBs, - ) -> Result<(), NakamotoNodeError> { - let rpc_socket = self.config.node.get_rpc_loopback().ok_or_else(|| { - NakamotoNodeError::MinerConfigurationFailed("Could not parse RPC bind") - })?; - let miners_contract_id = boot_code_id(MINERS_NAME, self.config.is_mainnet()); - let mut miners_session = - StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id.clone()); - let Some(miner_privkey) = self.config.miner.mining_key else { - return Err(NakamotoNodeError::MinerConfigurationFailed( - "No mining key configured, cannot mine", - )); - }; - let sort_db = SortitionDB::open( - &self.config.get_burn_db_file_path(), - true, - self.burnchain.pox_constants.clone(), - ) - .expect("FATAL: could not open sortition DB"); - let tip = SortitionDB::get_block_snapshot_consensus( - sort_db.conn(), - &new_block.header.consensus_hash, - ) - .expect("FATAL: could not retrieve chain tip") - .expect("FATAL: could not retrieve chain tip"); - let reward_cycle = self - .burnchain - .pox_constants - .block_height_to_reward_cycle( - self.burnchain.first_block_height, - self.burn_block.block_height, - ) - .expect("FATAL: building on a burn block that is before the first burn block"); - - let proposal_msg = BlockProposalSigners { - block: new_block.clone(), - burn_height: self.burn_block.block_height, - reward_cycle, - }; - let proposal = match NakamotoBlockBuilder::make_stackerdb_block_proposal( - &sort_db, - &tip, - &stackerdbs, - &proposal_msg, - &miner_privkey, - &miners_contract_id, - ) { - Ok(Some(chunk)) => chunk, - Ok(None) => { - warn!("Failed to propose block to stackerdb: no slot available"); - return Ok(()); - } - Err(e) => { - warn!("Failed to propose block to stackerdb: {e:?}"); - return Ok(()); - } - }; - - // Propose the block to the observing signers through the .miners stackerdb instance - match miners_session.put_chunk(&proposal) { - Ok(ack) => { - info!( - "Proposed block to stackerdb"; - "signer_sighash" => %new_block.header.signer_signature_hash(), - "ack_msg" => ?ack, - ); - } - Err(e) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to propose block to stackerdb {e:?}" - ))); - } - } - - self.globals.counters.bump_naka_proposed_blocks(); - Ok(()) - } - fn get_stackerdb_contract_and_slots( &self, stackerdbs: &StackerDBs, - msg_id: &MessageSlotID, + msg_id: u32, reward_cycle: u64, ) -> Result<(QualifiedContractIdentifier, HashMap), NakamotoNodeError> { let stackerdb_contracts = stackerdbs .get_stackerdb_contract_ids() .expect("FATAL: could not get the stacker DB contract ids"); - let signers_contract_id = - msg_id.stacker_db_contract(self.config.is_mainnet(), reward_cycle); + let signers_contract_id = NakamotoSigners::make_signers_db_contract_id( + reward_cycle, + msg_id, + self.config.is_mainnet(), + ); if !stackerdb_contracts.contains(&signers_contract_id) { return Err(NakamotoNodeError::SignerSignatureError( - "No signers contract found, cannot wait for signers".into(), + "No signers contract found, cannot wait for signers", )); }; // Get the slots for every signer @@ -472,7 +298,7 @@ impl BlockMinerThread { .wrapping_add(1); let (signers_contract_id, slot_ids_addresses) = self.get_stackerdb_contract_and_slots( stackerdbs, - &MessageSlotID::Transactions, + TRANSACTIONS_MSG_ID, next_reward_cycle, )?; let slot_ids = slot_ids_addresses.keys().cloned().collect::>(); @@ -536,10 +362,127 @@ impl BlockMinerThread { Ok(filtered_transactions.into_values().collect()) } - fn broadcast( + fn wait_for_signer_signature( &self, - block: NakamotoBlock, + stackerdbs: &StackerDBs, aggregate_public_key: &Point, + signer_signature_hash: &Sha512Trunc256Sum, + signer_weights: HashMap, + ) -> Result { + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("FATAL: no reward cycle for burn block"); + let (signers_contract_id, slot_ids_addresses) = + self.get_stackerdb_contract_and_slots(stackerdbs, BLOCK_MSG_ID, reward_cycle)?; + let slot_ids = slot_ids_addresses.keys().cloned().collect::>(); + // If more than a threshold percentage of the signers reject the block, we should not wait any further + let weights: u64 = signer_weights.values().sum(); + let rejection_threshold: u64 = (weights as f64 * 7_f64 / 10_f64).ceil() as u64; + let mut rejections = HashSet::new(); + let mut rejections_weight: u64 = 0; + let now = Instant::now(); + debug!("Miner: waiting for block response from reward cycle {reward_cycle } signers..."); + while now.elapsed() < self.config.miner.wait_on_signers { + // Get the block responses from the signers for the block we just proposed + let signer_chunks = stackerdbs + .get_latest_chunks(&signers_contract_id, &slot_ids) + .expect("FATAL: could not get latest chunks from stacker DB"); + let signer_messages: Vec<(u32, SignerMessage)> = slot_ids + .iter() + .zip(signer_chunks.into_iter()) + .filter_map(|(slot_id, chunk)| { + chunk.and_then(|chunk| { + read_next::(&mut &chunk[..]) + .ok() + .map(|msg| (*slot_id, msg)) + }) + }) + .collect(); + for (signer_id, signer_message) in signer_messages { + match signer_message { + SignerMessage::BlockResponse(BlockResponse::Accepted((hash, signature))) => { + // First check that this signature is for the block we proposed and that it is valid + if hash == *signer_signature_hash + && signature + .0 + .verify(aggregate_public_key, &signer_signature_hash.0) + { + // The signature is valid across the signer signature hash of the original proposed block + // Immediately return and update the block with this new signature before appending it to the chain + debug!("Miner: received a signature accross the proposed block's signer signature hash ({signer_signature_hash:?}): {signature:?}"); + return Ok(signature); + } + // We received an accepted block for some unknown block hash...Useless! Ignore it. + // Keep waiting for a threshold number of signers to either reject the proposed block + // or return valid signature to show up across the proposed block + } + SignerMessage::BlockResponse(BlockResponse::Rejected(block_rejection)) => { + // First check that this block rejection is for the block we proposed + if block_rejection.signer_signature_hash != *signer_signature_hash { + // This rejection is not for the block we proposed, so we can ignore it + continue; + } + if let RejectCode::SignedRejection(signature) = block_rejection.reason_code + { + let block_vote = NakamotoBlockVote { + signer_signature_hash: *signer_signature_hash, + rejected: true, + }; + let message = block_vote.serialize_to_vec(); + if signature.0.verify(aggregate_public_key, &message) { + // A threshold number of signers signed a denial of the proposed block + // Miner will NEVER get a signed block from the signers for this particular block + // Immediately return and attempt to mine a new block + return Err(NakamotoNodeError::SignerSignatureError( + "Signers signed a rejection of the proposed block", + )); + } + } else { + if rejections.contains(&signer_id) { + // We have already received a rejection from this signer + continue; + } + + // We received a rejection that is not signed. We will keep waiting for a threshold number of rejections. + // Ensure that we do not double count a rejection from the same signer. + rejections.insert(signer_id); + rejections_weight = rejections_weight.saturating_add( + *signer_weights + .get( + slot_ids_addresses + .get(&signer_id) + .expect("FATAL: signer not found in slot ids"), + ) + .expect("FATAL: signer not found in signer weights"), + ); + if rejections_weight > rejection_threshold { + // A threshold number of signers rejected the proposed block. + // Miner will likely never get a signed block from the signers for this particular block + // Return and attempt to mine a new block + return Err(NakamotoNodeError::SignerSignatureError( + "Threshold number of signers rejected the proposed block", + )); + } + } + } + _ => {} // Any other message is ignored + } + } + // We have not received a signed block or enough information to reject the proposed block. Wait a bit and try again. + thread::sleep(Duration::from_millis(WAIT_FOR_SIGNERS_MS)); + } + // We have waited for the signers for too long: stop waiting so we can propose a new block + debug!("Miner: exceeded signer signature timeout. Will propose a new block"); + Err(NakamotoNodeError::SignerSignatureError( + "Timed out waiting for signers", + )) + } + + fn wait_for_signer_signature_and_broadcast( + &self, + stackerdbs: &StackerDBs, + mut block: NakamotoBlock, ) -> Result<(), ChainstateError> { let mut chain_state = neon_node::open_chainstate_with_faults(&self.config) .expect("FATAL: could not open chainstate DB"); @@ -550,8 +493,35 @@ impl BlockMinerThread { self.burnchain.pox_constants.clone(), ) .expect("FATAL: could not open sortition DB"); - let mut sortition_handle = sort_db.index_handle_at_tip(); + let aggregate_public_key = NakamotoChainState::get_aggregate_public_key( + &mut chain_state, + &sort_db, + &sortition_handle, + &block, + )?; + + let reward_cycle = self + .burnchain + .block_height_to_reward_cycle(self.burn_block.block_height) + .expect("FATAL: no reward cycle for burn block"); + let signer_weights = NakamotoSigners::get_signers_weights( + &mut chain_state, + &sort_db, + &self.parent_tenure_id, + reward_cycle, + )?; + let signature = self + .wait_for_signer_signature( + &stackerdbs, + &aggregate_public_key, + &block.header.signer_signature_hash(), + signer_weights, + ) + .map_err(|e| { + ChainstateError::InvalidStacksBlock(format!("Invalid Nakamoto block: {e:?}")) + })?; + block.header.signer_signature = signature; let (headers_conn, staging_tx) = chain_state.headers_conn_and_staging_tx_begin()?; NakamotoChainState::accept_block( &chainstate_config, @@ -665,7 +635,10 @@ impl BlockMinerThread { .expect("FATAL: could not query chain tip") else { debug!("No Stacks chain tip known, will return a genesis block"); - let burnchain_params = burnchain_params_from_config(&self.config.burnchain); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, @@ -679,6 +652,7 @@ impl BlockMinerThread { parent_tenure_blocks: 0, }), stacks_parent_header: chain_tip.metadata, + parent_block_total_burn: 0, coinbase_nonce: 0, }); }; @@ -810,7 +784,7 @@ impl BlockMinerThread { self.get_signer_transactions(&mut chain_state, &burn_db, &stackerdbs)?; // build the block itself - let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( + let (mut block, _, _) = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db.index_conn(), &mut mem_pool, @@ -823,8 +797,6 @@ impl BlockMinerThread { false, self.globals.get_miner_status(), ), - // we'll invoke the event dispatcher ourselves so that it calculates the - // correct signer_sighash for `process_mined_nakamoto_block_event` Some(&self.event_dispatcher), signer_transactions, ) @@ -851,19 +823,15 @@ impl BlockMinerThread { block.header.miner_signature = miner_signature; info!( - "Miner: Assembled block #{} for signer set proposal: {}, with {} txs", + "Miner: Succeeded assembling {} block #{}: {}, with {} txs", + if parent_block_info.parent_block_total_burn == 0 { + "Genesis" + } else { + "Stacks" + }, block.header.chain_length, block.header.block_hash(), - block.txs.len(); - "signer_sighash" => %block.header.signer_signature_hash(), - ); - - self.event_dispatcher.process_mined_nakamoto_block_event( - self.burn_block.block_height, - &block, - size, - &consumed, - tx_events, + block.txs.len(), ); // last chance -- confirm that the stacks tip is unchanged (since it could have taken long @@ -911,6 +879,26 @@ impl ParentStacksBlockInfo { .expect("Failed to look up block's parent snapshot") .expect("Failed to look up block's parent snapshot"); + let parent_sortition_id = &parent_snapshot.sortition_id; + + let parent_block_total_burn = + if &stacks_tip_header.consensus_hash == &FIRST_BURNCHAIN_CONSENSUS_HASH { + 0 + } else { + let parent_burn_block = + SortitionDB::get_block_snapshot(burn_db.conn(), parent_sortition_id) + .expect("SortitionDB failure.") + .ok_or_else(|| { + error!( + "Failed to find block snapshot for the parent sortition"; + "parent_sortition_id" => %parent_sortition_id + ); + NakamotoNodeError::SnapshotNotFoundForChainTip + })?; + + parent_burn_block.total_burn + }; + // don't mine off of an old burnchain block let burn_chain_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) .expect("FATAL: failed to query sortition DB for canonical burn chain tip"); @@ -972,19 +960,9 @@ impl ParentStacksBlockInfo { None }; - debug!( - "Looked up parent information"; - "parent_tenure_id" => %parent_tenure_id, - "parent_tenure_consensus_hash" => %parent_tenure_header.consensus_hash, - "parent_tenure_burn_hash" => %parent_tenure_header.burn_header_hash, - "parent_tenure_burn_height" => parent_tenure_header.burn_header_height, - "mining_consensus_hash" => %check_burn_block.consensus_hash, - "mining_burn_hash" => %check_burn_block.burn_header_hash, - "mining_burn_height" => check_burn_block.block_height, - "stacks_tip_consensus_hash" => %parent_snapshot.consensus_hash, - "stacks_tip_burn_hash" => %parent_snapshot.burn_header_hash, - "stacks_tip_burn_height" => parent_snapshot.block_height, - ); + debug!("Mining tenure's last consensus hash: {} (height {} hash {}), stacks tip consensus hash: {} (height {} hash {})", + &check_burn_block.consensus_hash, check_burn_block.block_height, &check_burn_block.burn_header_hash, + &parent_snapshot.consensus_hash, parent_snapshot.block_height, &parent_snapshot.burn_header_hash); let coinbase_nonce = { let principal = miner_address.into(); @@ -1005,6 +983,7 @@ impl ParentStacksBlockInfo { Ok(ParentStacksBlockInfo { stacks_parent_header: stacks_tip_header, + parent_block_total_burn, coinbase_nonce, parent_tenure: parent_tenure_info, }) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index f638ae93241..1ee3135c245 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -244,6 +244,11 @@ impl RelayerThread { self.min_network_download_passes = net_result.num_download_passes + 1; self.min_network_inv_passes = net_result.num_inv_sync_passes + 1; self.last_network_block_height_ts = get_epoch_time_ms(); + debug!( + "Relayer: block mining until the next download pass {}", + self.min_network_download_passes + ); + signal_mining_blocked(self.globals.get_miner_status()); } let net_receipts = self @@ -562,7 +567,6 @@ impl RelayerThread { "Relayer: Spawn tenure thread"; "height" => last_burn_block.block_height, "burn_header_hash" => %burn_header_hash, - "parent_tenure_id" => %parent_tenure_id, ); let miner_thread_state = @@ -589,17 +593,14 @@ impl RelayerThread { let new_miner_state = self.create_block_miner(vrf_key, burn_tip, parent_tenure_start)?; let new_miner_handle = std::thread::Builder::new() - .name(format!("miner.{parent_tenure_start}")) + .name(format!("miner-{}", self.local_peer.data_url)) .stack_size(BLOCK_PROCESSOR_STACK_SIZE) .spawn(move || new_miner_state.run_miner(prior_tenure_thread)) .map_err(|e| { error!("Relayer: Failed to start tenure thread: {:?}", &e); NakamotoNodeError::SpawnError(e) })?; - debug!( - "Relayer: started tenure thread ID {:?}", - new_miner_handle.thread().id() - ); + self.miner_thread.replace(new_miner_handle); Ok(()) @@ -609,10 +610,8 @@ impl RelayerThread { // when stopping a tenure, block the mining thread if its currently running, then join it. // do this in a new thread will (so that the new thread stalls, not the relayer) let Some(prior_tenure_thread) = self.miner_thread.take() else { - debug!("Relayer: no tenure thread to stop"); return Ok(()); }; - let id = prior_tenure_thread.thread().id(); let globals = self.globals.clone(); let stop_handle = std::thread::Builder::new() @@ -624,7 +623,7 @@ impl RelayerThread { })?; self.miner_thread.replace(stop_handle); - debug!("Relayer: stopped tenure thread ID {id:?}"); + Ok(()) } @@ -641,35 +640,18 @@ impl RelayerThread { MinerDirective::BeginTenure { parent_tenure_start, burnchain_tip, - } => match self.start_new_tenure(parent_tenure_start, burnchain_tip) { - Ok(()) => { - debug!("Relayer: successfully started new tenure."); - } - Err(e) => { - error!("Relayer: Failed to start new tenure: {:?}", e); - } - }, + } => { + let _ = self.start_new_tenure(parent_tenure_start, burnchain_tip); + } MinerDirective::ContinueTenure { new_burn_view: _ } => { // TODO: in this case, we eventually want to undergo a tenure // change to switch to the new burn view, but right now, we will // simply end our current tenure if it exists - match self.stop_tenure() { - Ok(()) => { - debug!("Relayer: successfully stopped tenure."); - } - Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); - } - } + let _ = self.stop_tenure(); + } + MinerDirective::StopTenure => { + let _ = self.stop_tenure(); } - MinerDirective::StopTenure => match self.stop_tenure() { - Ok(()) => { - debug!("Relayer: successfully stopped tenure."); - } - Err(e) => { - error!("Relayer: Failed to stop tenure: {:?}", e); - } - }, } true diff --git a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs deleted file mode 100644 index b1118bebff2..00000000000 --- a/testnet/stacks-node/src/nakamoto_node/sign_coordinator.rs +++ /dev/null @@ -1,493 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::sync::mpsc::Receiver; -use std::time::{Duration, Instant}; - -use hashbrown::{HashMap, HashSet}; -use libsigner::{ - MessageSlotID, SignerEntries, SignerEvent, SignerMessage, SignerSession, StackerDBSession, -}; -use stacks::burnchains::Burnchain; -use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::BlockSnapshot; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use stacks::chainstate::stacks::boot::{NakamotoSignerEntry, RewardSet, MINERS_NAME, SIGNERS_NAME}; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; -use stacks::chainstate::stacks::{Error as ChainstateError, ThresholdSignature}; -use stacks::libstackerdb::StackerDBChunkData; -use stacks::net::stackerdb::StackerDBs; -use stacks::util_lib::boot::boot_code_id; -use stacks_common::codec::StacksMessageCodec; -use stacks_common::types::chainstate::{StacksPrivateKey, StacksPublicKey}; -use wsts::common::PolyCommitment; -use wsts::curve::ecdsa; -use wsts::curve::point::Point; -use wsts::curve::scalar::Scalar; -use wsts::state_machine::coordinator::fire::Coordinator as FireCoordinator; -use wsts::state_machine::coordinator::{Config as CoordinatorConfig, Coordinator}; -use wsts::state_machine::PublicKeys; -use wsts::v2::Aggregator; - -use super::Error as NakamotoNodeError; -use crate::event_dispatcher::STACKER_DB_CHANNEL; -use crate::Config; - -/// How long should the coordinator poll on the event receiver before -/// waking up to check timeouts? -static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(50); - -/// The `SignCoordinator` struct represents a WSTS FIRE coordinator whose -/// sole function is to serve as the coordinator for Nakamoto block signing. -/// This coordinator does not operate as a DKG coordinator. Rather, this struct -/// is used by Nakamoto miners to act as the coordinator for the blocks they -/// produce. -pub struct SignCoordinator { - coordinator: FireCoordinator, - receiver: Option>, - message_key: Scalar, - wsts_public_keys: PublicKeys, - is_mainnet: bool, - miners_session: StackerDBSession, - signing_round_timeout: Duration, -} - -pub struct NakamotoSigningParams { - /// total number of signers - pub num_signers: u32, - /// total number of keys - pub num_keys: u32, - /// threshold of keys needed to form a valid signature - pub threshold: u32, - /// map of signer_id to controlled key_ids - pub signer_key_ids: HashMap>, - /// ECDSA public keys as Point objects indexed by signer_id - pub signer_public_keys: HashMap, - pub wsts_public_keys: PublicKeys, -} - -impl Drop for SignCoordinator { - fn drop(&mut self) { - STACKER_DB_CHANNEL.replace_receiver(self.receiver.take().expect( - "FATAL: lost possession of the StackerDB channel before dropping SignCoordinator", - )); - } -} - -impl NakamotoSigningParams { - pub fn parse( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - ) -> Result { - let parsed = SignerEntries::parse(is_mainnet, reward_set).map_err(|e| { - ChainstateError::InvalidStacksBlock(format!( - "Invalid Reward Set: Could not parse into WSTS structs: {e:?}" - )) - })?; - - let num_keys = parsed - .count_keys() - .expect("FATAL: more than u32::max() signers in the reward set"); - let num_signers = parsed - .count_signers() - .expect("FATAL: more than u32::max() signers in the reward set"); - let threshold = parsed - .get_signing_threshold() - .expect("FATAL: more than u32::max() signers in the reward set"); - - Ok(NakamotoSigningParams { - num_signers, - threshold, - num_keys, - signer_key_ids: parsed.coordinator_key_ids, - signer_public_keys: parsed.signer_public_keys, - wsts_public_keys: parsed.public_keys, - }) - } -} - -fn get_signer_commitments( - is_mainnet: bool, - reward_set: &[NakamotoSignerEntry], - stackerdbs: &StackerDBs, - reward_cycle: u64, - expected_aggregate_key: &Point, -) -> Result, ChainstateError> { - let commitment_contract = - MessageSlotID::DkgResults.stacker_db_contract(is_mainnet, reward_cycle); - let signer_set_len = u32::try_from(reward_set.len()) - .map_err(|_| ChainstateError::InvalidStacksBlock("Reward set length exceeds u32".into()))?; - for signer_id in 0..signer_set_len { - let Some(signer_data) = stackerdbs.get_latest_chunk(&commitment_contract, signer_id)? - else { - warn!( - "Failed to fetch DKG result, will look for results from other signers."; - "signer_id" => signer_id - ); - continue; - }; - let Ok(SignerMessage::DkgResults { - aggregate_key, - party_polynomials, - }) = SignerMessage::consensus_deserialize(&mut signer_data.as_slice()) - else { - warn!( - "Failed to parse DKG result, will look for results from other signers."; - "signer_id" => signer_id, - ); - continue; - }; - - if &aggregate_key != expected_aggregate_key { - warn!( - "Aggregate key in DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "reported" => %aggregate_key, - ); - continue; - } - let computed_key = party_polynomials - .iter() - .fold(Point::default(), |s, (_, comm)| s + comm.poly[0]); - - if expected_aggregate_key != &computed_key { - warn!( - "Aggregate key computed from DKG results does not match expected, will look for results from other signers."; - "expected" => %expected_aggregate_key, - "computed" => %computed_key, - ); - continue; - } - - return Ok(party_polynomials); - } - error!( - "No valid DKG results found for the active signing set, cannot coordinate a group signature"; - "reward_cycle" => reward_cycle, - ); - Err(ChainstateError::InvalidStacksBlock( - "Failed to fetch DKG results for the active signer set".into(), - )) -} - -impl SignCoordinator { - /// * `reward_set` - the active reward set data, used to construct the signer - /// set parameters. - /// * `message_key` - the signing key that the coordinator will use to sign messages - /// broadcasted to the signer set. this should be the miner's registered key. - /// * `aggregate_public_key` - the active aggregate key for this cycle - pub fn new( - reward_set: &RewardSet, - reward_cycle: u64, - message_key: Scalar, - aggregate_public_key: Point, - stackerdb_conn: &StackerDBs, - config: &Config, - ) -> Result { - let is_mainnet = config.is_mainnet(); - let Some(ref reward_set_signers) = reward_set.signers else { - error!("Could not initialize WSTS coordinator for reward set without signer"); - return Err(ChainstateError::NoRegisteredSigners(0)); - }; - - let rpc_socket = config - .node - .get_rpc_loopback() - .ok_or_else(|| ChainstateError::MinerAborted)?; - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); - - let NakamotoSigningParams { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - wsts_public_keys, - } = NakamotoSigningParams::parse(is_mainnet, reward_set_signers.as_slice())?; - debug!( - "Initializing miner/coordinator"; - "num_signers" => num_signers, - "num_keys" => num_keys, - "threshold" => threshold, - "signer_key_ids" => ?signer_key_ids, - "signer_public_keys" => ?signer_public_keys, - "wsts_public_keys" => ?wsts_public_keys, - ); - let coord_config = CoordinatorConfig { - num_signers, - num_keys, - threshold, - signer_key_ids, - signer_public_keys, - dkg_threshold: threshold, - message_private_key: message_key.clone(), - ..Default::default() - }; - - let mut coordinator: FireCoordinator = FireCoordinator::new(coord_config); - let party_polynomials = get_signer_commitments( - is_mainnet, - reward_set_signers.as_slice(), - stackerdb_conn, - reward_cycle, - &aggregate_public_key, - )?; - if let Err(e) = coordinator - .set_key_and_party_polynomials(aggregate_public_key.clone(), party_polynomials) - { - warn!("Failed to set a valid set of party polynomials"; "error" => %e); - }; - - let (receiver, replaced_other) = STACKER_DB_CHANNEL.register_miner_coordinator(); - if replaced_other { - warn!("Replaced the miner/coordinator receiver of a prior thread. Prior thread may have crashed."); - } - - Ok(Self { - coordinator, - message_key, - receiver: Some(receiver), - wsts_public_keys, - is_mainnet, - miners_session, - signing_round_timeout: config.miner.wait_on_signers.clone(), - }) - } - - fn get_sign_id(burn_block_height: u64, burnchain: &Burnchain) -> u64 { - burnchain - .pox_constants - .reward_cycle_index(burnchain.first_block_height, burn_block_height) - .expect("FATAL: tried to initialize WSTS coordinator before first burn block height") - } - - fn send_signers_message( - message_key: &Scalar, - sortdb: &SortitionDB, - tip: &BlockSnapshot, - stackerdbs: &StackerDBs, - message: SignerMessage, - is_mainnet: bool, - miners_session: &mut StackerDBSession, - ) -> Result<(), String> { - let mut miner_sk = StacksPrivateKey::from_slice(&message_key.to_bytes()).unwrap(); - miner_sk.set_compress_public(true); - let miner_pubkey = StacksPublicKey::from_private(&miner_sk); - let Some(slot_range) = NakamotoChainState::get_miner_slot(sortdb, tip, &miner_pubkey) - .map_err(|e| format!("Failed to read miner slot information: {e:?}"))? - else { - return Err("No slot for miner".into()); - }; - let target_slot = 1; - let slot_id = slot_range.start + target_slot; - if !slot_range.contains(&slot_id) { - return Err("Not enough slots for miner messages".into()); - } - // Get the LAST slot version number written to the DB. If not found, use 0. - // Add 1 to get the NEXT version number - // Note: we already check above for the slot's existence - let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); - let slot_version = stackerdbs - .get_slot_version(&miners_contract_id, slot_id) - .map_err(|e| format!("Failed to read slot version: {e:?}"))? - .unwrap_or(0) - .saturating_add(1); - let mut chunk = StackerDBChunkData::new(slot_id, slot_version, message.serialize_to_vec()); - chunk - .sign(&miner_sk) - .map_err(|_| "Failed to sign StackerDB chunk")?; - - match miners_session.put_chunk(&chunk) { - Ok(ack) => { - debug!("Wrote message to stackerdb: {ack:?}"); - Ok(()) - } - Err(e) => { - warn!("Failed to write message to stackerdb {e:?}"); - Err("Failed to write message to stackerdb".into()) - } - } - } - - pub fn begin_sign( - &mut self, - block: &NakamotoBlock, - block_attempt: u64, - burn_tip: &BlockSnapshot, - burnchain: &Burnchain, - sortdb: &SortitionDB, - stackerdbs: &StackerDBs, - ) -> Result { - let sign_id = Self::get_sign_id(burn_tip.block_height, burnchain); - let sign_iter_id = block_attempt; - let reward_cycle_id = burnchain - .block_height_to_reward_cycle(burn_tip.block_height) - .expect("FATAL: tried to initialize coordinator before first burn block height"); - self.coordinator.current_sign_id = sign_id; - self.coordinator.current_sign_iter_id = sign_iter_id; - - let block_bytes = block.serialize_to_vec(); - let nonce_req_msg = self - .coordinator - .start_signing_round(&block_bytes, false, None) - .map_err(|e| { - NakamotoNodeError::SigningCoordinatorFailure(format!( - "Failed to start signing round in FIRE coordinator: {e:?}" - )) - })?; - Self::send_signers_message( - &self.message_key, - sortdb, - burn_tip, - &stackerdbs, - nonce_req_msg.into(), - self.is_mainnet, - &mut self.miners_session, - ) - .map_err(NakamotoNodeError::SigningCoordinatorFailure)?; - - let Some(ref mut receiver) = self.receiver else { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Failed to obtain the StackerDB event receiver".into(), - )); - }; - - let start_ts = Instant::now(); - while start_ts.elapsed() <= self.signing_round_timeout { - let event = match receiver.recv_timeout(EVENT_RECEIVER_POLL) { - Ok(event) => event, - Err(std::sync::mpsc::RecvTimeoutError::Timeout) => { - continue; - } - Err(std::sync::mpsc::RecvTimeoutError::Disconnected) => { - return Err(NakamotoNodeError::SigningCoordinatorFailure( - "StackerDB event receiver disconnected".into(), - )) - } - }; - - let is_signer_event = - event.contract_id.name.starts_with(SIGNERS_NAME) && event.contract_id.is_boot(); - if !is_signer_event { - debug!("Ignoring StackerDB event for non-signer contract"; "contract" => %event.contract_id); - continue; - } - let Ok(signer_event) = SignerEvent::try_from(event).map_err(|e| { - warn!("Failure parsing StackerDB event into signer event. Ignoring message."; "err" => ?e); - }) else { - continue; - }; - let SignerEvent::SignerMessages(signer_set, messages) = signer_event else { - debug!("Received signer event other than a signer message. Ignoring."); - continue; - }; - if signer_set != u32::try_from(reward_cycle_id % 2).unwrap() { - debug!("Received signer event for other reward cycle. Ignoring."); - continue; - }; - debug!("Miner/Coordinator: Received messages from signers"; "count" => messages.len()); - let coordinator_pk = ecdsa::PublicKey::new(&self.message_key).map_err(|_e| { - NakamotoNodeError::MinerSignatureError("Bad signing key for the FIRE coordinator") - })?; - let packets: Vec<_> = messages - .into_iter() - .filter_map(|msg| match msg { - SignerMessage::DkgResults { .. } - | SignerMessage::BlockResponse(_) - | SignerMessage::Transactions(_) => None, - SignerMessage::Packet(packet) => { - debug!("Received signers packet: {packet:?}"); - if !packet.verify(&self.wsts_public_keys, &coordinator_pk) { - warn!("Failed to verify StackerDB packet: {packet:?}"); - None - } else { - Some(packet) - } - } - }) - .collect(); - let (outbound_msgs, op_results) = self - .coordinator - .process_inbound_messages(&packets) - .unwrap_or_else(|e| { - error!( - "Miner/Coordinator: Failed to process inbound message packets"; - "err" => ?e - ); - (vec![], vec![]) - }); - for operation_result in op_results.into_iter() { - match operation_result { - wsts::state_machine::OperationResult::Dkg { .. } - | wsts::state_machine::OperationResult::SignTaproot(_) - | wsts::state_machine::OperationResult::DkgError(_) => { - debug!("Ignoring unrelated operation result"); - } - wsts::state_machine::OperationResult::Sign(signature) => { - // check if the signature actually corresponds to our block? - let block_sighash = block.header.signer_signature_hash(); - let verified = signature.verify( - self.coordinator.aggregate_public_key.as_ref().unwrap(), - &block_sighash.0, - ); - let signature = ThresholdSignature(signature); - if !verified { - warn!( - "Processed signature but didn't validate over the expected block. Returning error."; - "signature" => %signature, - "block_signer_signature_hash" => %block_sighash - ); - return Err(NakamotoNodeError::SignerSignatureError( - "Signature failed to validate over the expected block".into(), - )); - } else { - return Ok(signature); - } - } - wsts::state_machine::OperationResult::SignError(e) => { - return Err(NakamotoNodeError::SignerSignatureError(format!( - "Signing failed: {e:?}" - ))) - } - } - } - for msg in outbound_msgs { - match Self::send_signers_message( - &self.message_key, - sortdb, - burn_tip, - stackerdbs, - msg.into(), - self.is_mainnet, - &mut self.miners_session, - ) { - Ok(()) => { - debug!("Miner/Coordinator: sent outbound message."); - } - Err(e) => { - warn!( - "Miner/Coordinator: Failed to send message to StackerDB instance: {e:?}." - ); - } - }; - } - } - - Err(NakamotoNodeError::SignerSignatureError( - "Timed out waiting for group signature".into(), - )) - } -} diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 2a05feb44e6..49064d4971c 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -152,7 +152,7 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use stacks::burnchains::bitcoin::address::{BitcoinAddress, LegacyBitcoinAddressType}; use stacks::burnchains::db::BurnchainHeaderReader; -use stacks::burnchains::{Burnchain, BurnchainSigner, PoxConstants, Txid}; +use stacks::burnchains::{Burnchain, BurnchainParameters, BurnchainSigner, PoxConstants, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::leader_block_commit::{ RewardSetInfo, BURN_BLOCK_MINED_AT_MODULUS, @@ -204,7 +204,7 @@ use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs}; use super::{BurnchainController, Config, EventDispatcher, Keychain}; use crate::burnchains::bitcoin_regtest_controller::{ - addr2str, burnchain_params_from_config, BitcoinRegtestController, OngoingBlockCommit, + addr2str, BitcoinRegtestController, OngoingBlockCommit, }; use crate::burnchains::make_bitcoin_indexer; use crate::chain_data::MinerStats; @@ -516,8 +516,6 @@ pub(crate) struct BlockMinerThread { burn_block: BlockSnapshot, /// Handle to the node's event dispatcher event_dispatcher: EventDispatcher, - /// Failed to submit last attempted block - failed_to_submit_last_attempt: bool, } /// State representing the microblock miner. @@ -1022,7 +1020,6 @@ impl BlockMinerThread { registered_key, burn_block, event_dispatcher: rt.event_dispatcher.clone(), - failed_to_submit_last_attempt: false, } } @@ -1503,7 +1500,10 @@ impl BlockMinerThread { (parent_info, canonical) } else { debug!("No Stacks chain tip known, will return a genesis block"); - let burnchain_params = burnchain_params_from_config(&self.config.burnchain); + let (network, _) = self.config.burnchain.get_bitcoin_network(); + let burnchain_params = + BurnchainParameters::from_params(&self.config.burnchain.chain, &network) + .expect("Bitcoin network unsupported"); let chain_tip = ChainTip::genesis( &burnchain_params.first_block_hash, @@ -1543,9 +1543,7 @@ impl BlockMinerThread { Self::find_inflight_mined_blocks(self.burn_block.block_height, &self.last_mined_blocks); // has the tip changed from our previously-mined block for this epoch? - let should_unconditionally_mine = last_mined_blocks.is_empty() - || (last_mined_blocks.len() == 1 && !self.failed_to_submit_last_attempt); - let (attempt, max_txs) = if should_unconditionally_mine { + let (attempt, max_txs) = if last_mined_blocks.len() <= 1 { // always mine if we've not mined a block for this epoch yet, or // if we've mined just one attempt, unconditionally try again (so we // can use `subsequent_miner_time_ms` in this attempt) @@ -2273,34 +2271,16 @@ impl BlockMinerThread { let coinbase_tx = self.inner_generate_coinbase_tx(parent_block_info.coinbase_nonce, target_epoch_id); - // find the longest microblock tail we can build off of and vet microblocks for forks - self.load_and_vet_parent_microblocks( + // find the longest microblock tail we can build off of. + // target it to the microblock tail in parent_block_info + let microblocks_opt = self.load_and_vet_parent_microblocks( &mut chain_state, &burn_db, &mut mem_pool, &mut parent_block_info, ); - let burn_tip = SortitionDB::get_canonical_burn_chain_tip(burn_db.conn()) - .expect("FATAL: failed to read current burnchain tip"); - let microblocks_disabled = - SortitionDB::are_microblocks_disabled(burn_db.conn(), burn_tip.block_height) - .expect("FATAL: failed to query epoch's microblock status"); - // build the block itself - let mut builder_settings = self.config.make_block_builder_settings( - attempt, - false, - self.globals.get_miner_status(), - ); - if microblocks_disabled { - builder_settings.confirm_microblocks = false; - if cfg!(test) - && std::env::var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25").as_deref() == Ok("1") - { - builder_settings.confirm_microblocks = true; - } - } let (anchored_block, _, _) = match StacksBlockBuilder::build_anchored_block( &chain_state, &burn_db.index_conn(), @@ -2310,25 +2290,39 @@ impl BlockMinerThread { vrf_proof.clone(), mblock_pubkey_hash, &coinbase_tx, - builder_settings, + self.config.make_block_builder_settings( + attempt, + false, + self.globals.get_miner_status(), + ), Some(&self.event_dispatcher), - &self.burnchain, ) { Ok(block) => block, Err(ChainstateError::InvalidStacksMicroblock(msg, mblock_header_hash)) => { // part of the parent microblock stream is invalid, so try again - info!( - "Parent microblock stream is invalid; trying again without microblocks"; - "microblock_offender" => %mblock_header_hash, - "error" => &msg - ); - - let mut builder_settings = self.config.make_block_builder_settings( - attempt, - false, - self.globals.get_miner_status(), - ); - builder_settings.confirm_microblocks = false; + info!("Parent microblock stream is invalid; trying again without the offender {} (msg: {})", &mblock_header_hash, &msg); + + // truncate the stream + parent_block_info.stacks_parent_header.microblock_tail = match microblocks_opt { + Some(microblocks) => { + let mut tail = None; + for mblock in microblocks.into_iter() { + if mblock.block_hash() == mblock_header_hash { + break; + } + tail = Some(mblock); + } + if let Some(ref t) = &tail { + debug!( + "New parent microblock stream tail is {} (seq {})", + t.block_hash(), + t.header.sequence + ); + } + tail.map(|t| t.header) + } + None => None, + }; // try again match StacksBlockBuilder::build_anchored_block( @@ -2340,9 +2334,12 @@ impl BlockMinerThread { vrf_proof.clone(), mblock_pubkey_hash, &coinbase_tx, - builder_settings, + self.config.make_block_builder_settings( + attempt, + false, + self.globals.get_miner_status(), + ), Some(&self.event_dispatcher), - &self.burnchain, ) { Ok(block) => block, Err(e) => { @@ -2485,14 +2482,12 @@ impl BlockMinerThread { let res = bitcoin_controller.submit_operation(target_epoch_id, op, &mut op_signer, attempt); if res.is_none() { - self.failed_to_submit_last_attempt = true; if !self.config.node.mock_mining { warn!("Relayer: Failed to submit Bitcoin transaction"); return None; + } else { + debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); } - debug!("Relayer: Mock-mining enabled; not sending Bitcoin transaction"); - } else { - self.failed_to_submit_last_attempt = false; } Some(MinerThreadResult::Block( @@ -3065,9 +3060,6 @@ impl RelayerThread { // one. ProcessTenure(..) messages can get lost. let burn_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) .expect("FATAL: failed to read current burnchain tip"); - let mut microblocks_disabled = - SortitionDB::are_microblocks_disabled(self.sortdb_ref().conn(), burn_tip.block_height) - .expect("FATAL: failed to query epoch's microblock status"); let tenures = if let Some(last_ch) = self.last_tenure_consensus_hash.as_ref() { let mut tenures = vec![]; @@ -3203,18 +3195,11 @@ impl RelayerThread { // update state for microblock mining self.setup_microblock_mining_state(miner_tip); - if cfg!(test) - && std::env::var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25").as_deref() == Ok("1") - { - debug!("Allowing miner to mine microblocks because STACKS_TEST_FORCE_MICROBLOCKS_POST_25 = 1"); - microblocks_disabled = false; - } - // resume mining if we blocked it if num_tenures > 0 || num_sortitions > 0 { if self.miner_tip.is_some() { // we won the highest tenure - if self.config.node.mine_microblocks && !microblocks_disabled { + if self.config.node.mine_microblocks { // mine a microblock first self.mined_stacks_block = true; } else { @@ -3509,23 +3494,6 @@ impl RelayerThread { test_debug!("Relayer: not configured to mine microblocks"); return false; } - - let burn_tip = SortitionDB::get_canonical_burn_chain_tip(self.sortdb_ref().conn()) - .expect("FATAL: failed to read current burnchain tip"); - let microblocks_disabled = - SortitionDB::are_microblocks_disabled(self.sortdb_ref().conn(), burn_tip.block_height) - .expect("FATAL: failed to query epoch's microblock status"); - - if microblocks_disabled { - if cfg!(test) - && std::env::var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25").as_deref() == Ok("1") - { - debug!("Allowing miner to mine microblocks because STACKS_TEST_FORCE_MICROBLOCKS_POST_25 = 1"); - } else { - return false; - } - } - if !self.miner_thread_try_join() { // already running (for an anchored block or microblock) test_debug!("Relayer: miner thread already running so cannot mine microblock"); diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 77117a6822b..90c2123079a 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -345,15 +345,6 @@ impl Node { } let burnchain_config = config.get_burnchain(); - - // instantiate DBs - let _burnchain_db = BurnchainDB::connect( - &burnchain_config.get_burnchaindb_path(), - &burnchain_config, - true, - ) - .expect("FATAL: failed to connect to burnchain DB"); - run_loop::announce_boot_receipts( &mut event_dispatcher, &chain_state, @@ -533,7 +524,6 @@ impl Node { let consensus_hash = burnchain_tip.block_snapshot.consensus_hash; let burnchain = self.config.get_burnchain(); - let sortdb = SortitionDB::open( &self.config.get_burn_db_file_path(), true, diff --git a/testnet/stacks-node/src/run_loop/mod.rs b/testnet/stacks-node/src/run_loop/mod.rs index 01f848c2e6f..49cb4fb337a 100644 --- a/testnet/stacks-node/src/run_loop/mod.rs +++ b/testnet/stacks-node/src/run_loop/mod.rs @@ -193,6 +193,5 @@ pub fn announce_boot_receipts( &ExecutionCost::zero(), pox_constants, &None, - &None, ); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index d99c1ea24aa..9a875d1786d 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1083,19 +1083,9 @@ impl RunLoop { let liveness_thread = self.spawn_chain_liveness_thread(globals.clone()); // Wait for all pending sortitions to process - let mut burnchain_db = burnchain_config - .open_burnchain_db(true) + let burnchain_db = burnchain_config + .open_burnchain_db(false) .expect("FATAL: failed to open burnchain DB"); - if !self.config.burnchain.affirmation_overrides.is_empty() { - let tx = burnchain_db - .tx_begin() - .expect("FATAL: failed to begin burnchain DB tx"); - for (reward_cycle, affirmation) in self.config.burnchain.affirmation_overrides.iter() { - tx.set_override_affirmation_map(*reward_cycle, affirmation.clone()).expect(&format!("FATAL: failed to set affirmation override ({affirmation}) for reward cycle {reward_cycle}")); - } - tx.commit() - .expect("FATAL: failed to commit burnchain DB tx"); - } let burnchain_db_tip = burnchain_db .get_canonical_chain_tip() .expect("FATAL: failed to query burnchain DB"); diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index fd7683f569e..882a65d06b0 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -101,7 +101,6 @@ impl<'a> Tenure { &self.coinbase_tx, BlockBuilderSettings::limited(), None, - &self.config.get_burnchain(), ) .unwrap(); diff --git a/testnet/stacks-node/src/tests/bitcoin_regtest.rs b/testnet/stacks-node/src/tests/bitcoin_regtest.rs index 5f8b1aabd39..6391dd9b2a0 100644 --- a/testnet/stacks-node/src/tests/bitcoin_regtest.rs +++ b/testnet/stacks-node/src/tests/bitcoin_regtest.rs @@ -76,6 +76,7 @@ impl BitcoinCoreController { Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), }; + eprintln!("bitcoind spawned, waiting for startup"); let mut out_reader = BufReader::new(process.stdout.take().unwrap()); let mut line = String::new(); @@ -97,34 +98,6 @@ impl BitcoinCoreController { Ok(()) } - pub fn stop_bitcoind(&mut self) -> Result<(), BitcoinCoreError> { - if let Some(_) = self.bitcoind_process.take() { - let mut command = Command::new("bitcoin-cli"); - command - .stdout(Stdio::piped()) - .arg("-rpcconnect=127.0.0.1") - .arg("-rpcport=8332") - .arg("-rpcuser=neon-tester") - .arg("-rpcpassword=neon-tester-pass") - .arg("stop"); - - let mut process = match command.spawn() { - Ok(child) => child, - Err(e) => return Err(BitcoinCoreError::SpawnFailed(format!("{:?}", e))), - }; - - let mut out_reader = BufReader::new(process.stdout.take().unwrap()); - let mut line = String::new(); - while let Ok(bytes_read) = out_reader.read_line(&mut line) { - if bytes_read == 0 { - break; - } - eprintln!("{}", &line); - } - } - Ok(()) - } - pub fn kill_bitcoind(&mut self) { if let Some(mut bitcoind_process) = self.bitcoind_process.take() { bitcoind_process.kill().unwrap(); @@ -169,8 +142,8 @@ fn bitcoind_integration(segwit_flag: bool) { conf.burnchain.password = Some("secret".to_string()); conf.burnchain.local_mining_public_key = Some("04ee0b1602eb18fef7986887a7e8769a30c9df981d33c8380d255edef003abdcd243a0eb74afdf6740e6c423e62aec631519a24cf5b1d62bf8a3e06ddc695dcb77".to_string()); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; conf.miner.segwit = segwit_flag; conf.initial_balances.push(InitialBalance { diff --git a/testnet/stacks-node/src/tests/epoch_205.rs b/testnet/stacks-node/src/tests/epoch_205.rs index 0b363081e0d..0f689f00ef8 100644 --- a/testnet/stacks-node/src/tests/epoch_205.rs +++ b/testnet/stacks-node/src/tests/epoch_205.rs @@ -981,8 +981,8 @@ fn bigger_microblock_streams_in_2_05() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; conf.burnchain.epochs = Some(vec![ StacksEpoch { diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 14db80f0b10..e26468a254b 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -97,9 +97,9 @@ fn advance_to_2_1( 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, - u32::MAX, + u64::max_value() - 2, + u64::max_value() - 1, + u32::max_value(), u32::MAX, u32::MAX, u32::MAX, @@ -601,7 +601,7 @@ fn transition_fixes_bitcoin_rigidity() { 15, (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), - u32::MAX, + u32::max_value(), u32::MAX, u32::MAX, u32::MAX, @@ -1044,8 +1044,8 @@ fn transition_adds_get_pox_addr_recipients() { 4 * prepare_phase_len / 5, 1, 1, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height, u32::MAX, u32::MAX, @@ -1811,8 +1811,8 @@ fn transition_empty_blocks() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, (epoch_2_1 + 1) as u32, u32::MAX, u32::MAX, @@ -4764,7 +4764,7 @@ fn trait_invocation_cross_epoch() { 15, (16 * reward_cycle_len - 1).into(), (17 * reward_cycle_len).into(), - u32::MAX, + u32::max_value(), u32::MAX, u32::MAX, u32::MAX, @@ -4982,8 +4982,8 @@ fn test_v1_unlock_height_with_current_stackers() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -5008,8 +5008,8 @@ fn test_v1_unlock_height_with_current_stackers() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, u32::MAX, u32::MAX, @@ -5247,8 +5247,8 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -5273,8 +5273,8 @@ fn test_v1_unlock_height_with_delay_and_current_stackers() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, u32::MAX, u32::MAX, diff --git a/testnet/stacks-node/src/tests/epoch_22.rs b/testnet/stacks-node/src/tests/epoch_22.rs index 4e387d6304f..5c58b26dedd 100644 --- a/testnet/stacks-node/src/tests/epoch_22.rs +++ b/testnet/stacks-node/src/tests/epoch_22.rs @@ -130,8 +130,8 @@ fn disable_pox() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -160,8 +160,8 @@ fn disable_pox() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, @@ -663,8 +663,8 @@ fn pox_2_unlock_all() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -693,8 +693,8 @@ fn pox_2_unlock_all() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, diff --git a/testnet/stacks-node/src/tests/epoch_23.rs b/testnet/stacks-node/src/tests/epoch_23.rs index 470eda96724..740785e1826 100644 --- a/testnet/stacks-node/src/tests/epoch_23.rs +++ b/testnet/stacks-node/src/tests/epoch_23.rs @@ -96,8 +96,8 @@ fn trait_invocation_behavior() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -128,8 +128,8 @@ fn trait_invocation_behavior() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, diff --git a/testnet/stacks-node/src/tests/epoch_24.rs b/testnet/stacks-node/src/tests/epoch_24.rs index 2cc9868dc65..b88441838a5 100644 --- a/testnet/stacks-node/src/tests/epoch_24.rs +++ b/testnet/stacks-node/src/tests/epoch_24.rs @@ -148,8 +148,8 @@ fn fix_to_pox_contract() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -182,8 +182,8 @@ fn fix_to_pox_contract() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, @@ -787,8 +787,8 @@ fn verify_auto_unlock_behavior() { conf.node.wait_time_for_blocks = 1_000; conf.miner.wait_for_block_download = false; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -821,8 +821,8 @@ fn verify_auto_unlock_behavior() { 4 * prepare_phase_len / 5, 5, 15, - u64::MAX - 2, - u64::MAX - 1, + u64::max_value() - 2, + u64::max_value() - 1, v1_unlock_height as u32, epoch_2_2 as u32 + 1, u32::MAX, diff --git a/testnet/stacks-node/src/tests/epoch_25.rs b/testnet/stacks-node/src/tests/epoch_25.rs deleted file mode 100644 index 42369b800a5..00000000000 --- a/testnet/stacks-node/src/tests/epoch_25.rs +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (C) 2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . - -use std::{env, thread}; - -use clarity::vm::types::PrincipalData; -use stacks::burnchains::{Burnchain, PoxConstants}; -use stacks::core; -use stacks_common::consts::STACKS_EPOCH_MAX; -use stacks_common::types::chainstate::StacksPrivateKey; - -use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; -use crate::tests::bitcoin_regtest::BitcoinCoreController; -use crate::tests::neon_integrations::{ - get_account, get_chain_info, neon_integration_test_conf, next_block_and_wait, submit_tx, - test_observer, wait_for_runloop, -}; -use crate::tests::{make_stacks_transfer_mblock_only, to_addr}; -use crate::{neon, BitcoinRegtestController, BurnchainController}; - -#[test] -#[ignore] -fn microblocks_disabled() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let reward_cycle_len = 10; - let prepare_phase_len = 3; - let epoch_2_05 = 1; - let epoch_2_1 = 2; - let v1_unlock_height = epoch_2_1 + 1; - let epoch_2_2 = 3; // two blocks before next prepare phase. - let epoch_2_3 = 4; - let epoch_2_4 = 5; - let pox_3_activation_height = epoch_2_4; - let epoch_2_5 = 210; - - let spender_1_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let spender_2_bal = 10_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - - let spender_1_sk = StacksPrivateKey::new(); - let spender_1_addr: PrincipalData = to_addr(&spender_1_sk).into(); - - let spender_2_sk = StacksPrivateKey::new(); - let spender_2_addr: PrincipalData = to_addr(&spender_2_sk).into(); - - let mut initial_balances = vec![]; - - initial_balances.push(InitialBalance { - address: spender_1_addr.clone(), - amount: spender_1_bal, - }); - - initial_balances.push(InitialBalance { - address: spender_2_addr.clone(), - amount: spender_2_bal, - }); - - let (mut conf, miner_account) = neon_integration_test_conf(); - - conf.node.mine_microblocks = true; - conf.burnchain.max_rbf = 1000000; - conf.node.wait_time_for_microblocks = 0; - conf.node.microblock_frequency = 1_000; - conf.node.wait_time_for_blocks = 2_000; - conf.miner.wait_for_block_download = false; - - conf.miner.first_attempt_time_ms = i64::max_value() as u64; - conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; - - test_observer::spawn(); - - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - }); - conf.initial_balances.append(&mut initial_balances); - - let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); - epochs[1].end_height = epoch_2_05; - epochs[2].start_height = epoch_2_05; - epochs[2].end_height = epoch_2_1; - epochs[3].start_height = epoch_2_1; - epochs[3].end_height = epoch_2_2; - epochs[4].start_height = epoch_2_2; - epochs[4].end_height = epoch_2_3; - epochs[5].start_height = epoch_2_3; - epochs[5].end_height = epoch_2_4; - epochs[6].start_height = epoch_2_4; - epochs[6].end_height = epoch_2_5; - epochs[7].start_height = epoch_2_5; - epochs[7].end_height = STACKS_EPOCH_MAX; - epochs.truncate(8); - conf.burnchain.epochs = Some(epochs); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 4 * prepare_phase_len / 5, - 5, - 15, - u64::max_value() - 2, - u64::max_value() - 1, - v1_unlock_height as u32, - epoch_2_2 as u32 + 1, - u32::MAX, - pox_3_activation_height as u32, - ); - burnchain_config.pox_constants = pox_constants.clone(); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let runloop_burnchain = burnchain_config.clone(); - - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(Some(runloop_burnchain), 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // second block will be the first mined Stacks block - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // push us to block 205 - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 0, 500, &spender_2_addr, 500); - submit_tx(&http_origin, &tx); - - // wait until just before epoch 2.5 - loop { - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height >= epoch_2_5 - 2 { - break; - } - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - } - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - info!("Test passed processing 2.5"); - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - let tx = make_stacks_transfer_mblock_only(&spender_1_sk, 1, 500, &spender_2_addr, 500); - submit_tx(&http_origin, &tx); - - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } - } - - // second transaction should not have been processed! - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() - ); - assert_eq!(test_observer::get_microblocks().len(), 1); - - let miner_nonce_before_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // Now, lets tell the miner to try to mine microblocks, but don't try to confirm them! - env::set_var("STACKS_TEST_FORCE_MICROBLOCKS_POST_25", "1"); - - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } - } - - let miner_nonce_after_microblock_assembly = get_account(&http_origin, &miner_account).nonce; - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - // but we should have assembled and announced at least 1 to the observer - assert!(test_observer::get_microblocks().len() >= 2); - info!( - "Microblocks assembled: {}", - test_observer::get_microblocks().len() - ); - - // and our miner should have gotten some blocks accepted - assert!( - miner_nonce_after_microblock_assembly > miner_nonce_before_microblock_assembly, - "Mined before started microblock assembly: {miner_nonce_before_microblock_assembly}, Mined after started microblock assembly: {miner_nonce_after_microblock_assembly}" - ); - - // Now, tell the miner to try to confirm microblocks as well. - // This should test that the block gets rejected by append block - env::set_var("STACKS_TEST_CONFIRM_MICROBLOCKS_POST_25", "1"); - - let mut last_block_height = get_chain_info(&conf).burn_block_height; - for _i in 0..2 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let tip_info = get_chain_info(&conf); - if tip_info.burn_block_height > last_block_height { - last_block_height = tip_info.burn_block_height; - } else { - panic!("FATAL: failed to mine"); - } - } - - let miner_nonce_after_microblock_confirmation = get_account(&http_origin, &miner_account).nonce; - - // and our miner should have gotten at most one more block accepted - // (because they may have had 1 block confirmation in the bitcoin mempool which didn't confirm a microblock - // before we flipped the flag) - assert!( - miner_nonce_after_microblock_confirmation <= miner_nonce_after_microblock_assembly + 1, - "Mined after started microblock confimration: {miner_nonce_after_microblock_confirmation}", - ); - - // second transaction should not have been processed -- even though we should have - // produced microblocks, they should not get accepted to the chain state - let account = get_account(&http_origin, &spender_1_addr); - assert_eq!( - u64::try_from(account.balance).unwrap(), - spender_1_bal - 1_000 - ); - assert_eq!(account.nonce, 1); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 35dca5b5370..2bb9bd891e5 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -183,8 +183,8 @@ fn integration_test_get_info() { }); conf.burnchain.commit_anchor_block_within = 5000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; let num_rounds = 5; diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 5a237e6e20c..7dbabae3ed6 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -57,10 +57,9 @@ mod epoch_21; mod epoch_22; mod epoch_23; mod epoch_24; -mod epoch_25; mod integrations; mod mempool; -pub mod nakamoto_integrations; +mod nakamoto_integrations; pub mod neon_integrations; mod signer; mod stackerdb; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index aa545514f05..075c7455370 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -15,7 +15,6 @@ // along with this program. If not, see . use std::collections::{HashMap, HashSet}; use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::mpsc::{channel, Receiver, Sender}; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -26,18 +25,15 @@ use clarity::vm::costs::ExecutionCost; use clarity::vm::types::{PrincipalData, QualifiedContractIdentifier}; use http_types::headers::AUTHORIZATION; use lazy_static::lazy_static; -use libsigner::{SignerSession, StackerDBSession}; -use rand::RngCore; -use stacks::burnchains::{MagicBytes, Txid}; +use libsigner::{BlockResponse, SignerMessage, SignerSession, StackerDBSession}; +use stacks::burnchains::MagicBytes; use stacks::chainstate::burn::db::sortdb::SortitionDB; -use stacks::chainstate::burn::operations::{ - BlockstackOperationType, PreStxOp, StackStxOp, VoteForAggregateKeyOp, -}; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::miner::NakamotoBlockBuilder; +use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; use stacks::chainstate::nakamoto::test_signers::TestSigners; use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; -use stacks::chainstate::stacks::address::{PoxAddress, StacksAddressExtensions}; +use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::{ MINERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; @@ -48,41 +44,36 @@ use stacks::core::{ StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_10, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, - PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, PEER_VERSION_TESTNET, + PEER_VERSION_EPOCH_2_5, PEER_VERSION_EPOCH_3_0, }; -use stacks::libstackerdb::SlotMetadata; +use stacks::libstackerdb::{SlotMetadata, StackerDBChunkData}; use stacks::net::api::callreadonly::CallReadOnlyRequestBody; use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::postblock_proposal::{ BlockValidateReject, BlockValidateResponse, NakamotoBlockProposal, ValidateRejectCode, }; -use stacks::util::hash::hex_bytes; use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::signed_structured_data::pox4::{ make_pox_4_signer_key_signature, Pox4SignatureTopic, }; use stacks_common::address::AddressHashMode; -use stacks_common::bitvec::BitVec; use stacks_common::codec::StacksMessageCodec; use stacks_common::consts::{CHAIN_ID_TESTNET, STACKS_EPOCH_MAX}; use stacks_common::types::chainstate::{ - BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, + BlockHeaderHash, StacksAddress, StacksPrivateKey, StacksPublicKey, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{to_hex, Sha512Trunc256Sum}; -use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use stacks_common::util::sleep_ms; +use stacks_common::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; use super::bitcoin_regtest::BitcoinCoreController; use crate::config::{EventKeyType, EventObserverConfig, InitialBalance}; use crate::neon::{Counters, RunLoopCounter}; -use crate::operations::BurnchainOpSigner; use crate::run_loop::boot_nakamoto; use crate::tests::neon_integrations::{ get_account, get_chain_info_result, get_pox_info, next_block_and_wait, run_until_burnchain_height, submit_tx, test_observer, wait_for_runloop, }; -use crate::tests::{get_chain_info, make_stacks_transfer, to_addr}; +use crate::tests::{make_stacks_transfer, to_addr}; use crate::{tests, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; pub static POX_4_DEFAULT_STACKER_BALANCE: u64 = 100_000_000_000_000; @@ -156,51 +147,6 @@ lazy_static! { ]; } -pub static TEST_SIGNING: Mutex> = Mutex::new(None); - -pub struct TestSigningChannel { - pub recv: Option>, - pub send: Sender, -} - -impl TestSigningChannel { - /// If the integration test has instantiated the singleton TEST_SIGNING channel, - /// wait for a signature from the blind-signer. - /// Returns None if the singleton isn't instantiated and the miner should coordinate - /// a real signer set signature. - /// Panics if the blind-signer times out. - pub fn get_signature() -> Option { - let mut signer = TEST_SIGNING.lock().unwrap(); - let Some(sign_channels) = signer.as_mut() else { - return None; - }; - let recv = sign_channels.recv.take().unwrap(); - drop(signer); // drop signer so we don't hold the lock while receiving. - let signature = recv.recv_timeout(Duration::from_secs(30)).unwrap(); - let overwritten = TEST_SIGNING - .lock() - .unwrap() - .as_mut() - .unwrap() - .recv - .replace(recv); - assert!(overwritten.is_none()); - Some(signature) - } - - /// Setup the TestSigningChannel as a singleton using TEST_SIGNING, - /// returning an owned Sender to the channel. - pub fn instantiate() -> Sender { - let (send, recv) = channel(); - let existed = TEST_SIGNING.lock().unwrap().replace(Self { - recv: Some(recv), - send: send.clone(), - }); - assert!(existed.is_none()); - send - } -} - pub fn get_stacker_set(http_origin: &str, cycle: u64) -> GetStackersResponse { let client = reqwest::blocking::Client::new(); let path = format!("{http_origin}/v2/stacker_set/{cycle}"); @@ -263,12 +209,13 @@ pub fn add_initial_balances( pub fn blind_signer( conf: &Config, signers: &TestSigners, + signer: &Secp256k1PrivateKey, proposals_count: RunLoopCounter, ) -> JoinHandle<()> { - let sender = TestSigningChannel::instantiate(); let mut signed_blocks = HashSet::new(); let conf = conf.clone(); let signers = signers.clone(); + let signer = signer.clone(); let mut last_count = proposals_count.load(Ordering::SeqCst); thread::spawn(move || loop { thread::sleep(Duration::from_millis(100)); @@ -277,7 +224,7 @@ pub fn blind_signer( continue; } last_count = cur_count; - match read_and_sign_block_proposal(&conf, &signers, &signed_blocks, &sender) { + match read_and_sign_block_proposal(&conf, &signers, &signer, &signed_blocks) { Ok(signed_block) => { if signed_blocks.contains(&signed_block) { continue; @@ -295,8 +242,8 @@ pub fn blind_signer( pub fn read_and_sign_block_proposal( conf: &Config, signers: &TestSigners, + signer: &Secp256k1PrivateKey, signed_blocks: &HashSet, - channel: &Sender, ) -> Result { let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -313,13 +260,12 @@ pub fn read_and_sign_block_proposal( let miner_contract_id = boot_code_id(MINERS_NAME, false); let mut miners_stackerdb = StackerDBSession::new(&conf.node.rpc_bind, miner_contract_id); miners_stackerdb - .get_latest(miner_slot_id.start) + .get_latest(miner_slot_id) .map_err(|_| "Failed to get latest chunk from the miner slot ID")? .ok_or("No chunk found")? }; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let signer_sig_hash = proposed_block.header.signer_signature_hash(); - if signed_blocks.contains(&signer_sig_hash) { // already signed off on this block, don't sign again. return Ok(signer_sig_hash); @@ -335,10 +281,35 @@ pub fn read_and_sign_block_proposal( .clone() .sign_nakamoto_block(&mut proposed_block, reward_cycle); - channel - .send(proposed_block.header.signer_signature) + let signer_message = SignerMessage::BlockResponse(BlockResponse::Accepted(( + signer_sig_hash.clone(), + proposed_block.header.signer_signature.clone(), + ))); + + let signers_contract_id = + NakamotoSigners::make_signers_db_contract_id(reward_cycle, libsigner::BLOCK_MSG_ID, false); + + let http_origin = format!("http://{}", &conf.node.rpc_bind); + let signers_info = get_stacker_set(&http_origin, reward_cycle); + let signer_index = get_signer_index(&signers_info, &Secp256k1PublicKey::from_private(signer)) + .unwrap() + .try_into() .unwrap(); - return Ok(signer_sig_hash); + + let next_version = get_stackerdb_slot_version(&http_origin, &signers_contract_id, signer_index) + .map(|x| x + 1) + .unwrap_or(0); + let mut signers_contract_sess = StackerDBSession::new(&conf.node.rpc_bind, signers_contract_id); + let mut chunk_to_put = StackerDBChunkData::new( + u32::try_from(signer_index).unwrap(), + next_version, + signer_message.serialize_to_vec(), + ); + chunk_to_put.sign(signer).unwrap(); + signers_contract_sess + .put_chunk(&chunk_to_put) + .map_err(|e| e.to_string())?; + Ok(signer_sig_hash) } /// Return a working nakamoto-neon config and the miner's bitcoin address to fund @@ -401,8 +372,6 @@ pub fn naka_neon_integration_conf(seed: Option<&[u8]>) -> (Config, StacksAddress conf.burnchain.pox_prepare_length = Some(5); conf.burnchain.pox_reward_length = Some(20); - conf.connection_options.inv_sync_interval = 1; - (conf, miner_account) } @@ -683,12 +652,11 @@ fn get_signer_index( }) } -/// Use the read-only API to get the aggregate key for a given reward cycle -pub fn get_key_for_cycle( +fn is_key_set_for_cycle( reward_cycle: u64, is_mainnet: bool, http_origin: &str, -) -> Result>, String> { +) -> Result { let client = reqwest::blocking::Client::new(); let boot_address = StacksAddress::burn_address(is_mainnet); let path = format!("http://{http_origin}/v2/contracts/call-read/{boot_address}/signers-voting/get-approved-aggregate-key"); @@ -714,29 +682,10 @@ pub fn get_key_for_cycle( ) .map_err(|_| "Failed to deserialize Clarity value")?; - let buff_opt = result_value + result_value .expect_optional() - .expect("Expected optional type"); - - match buff_opt { - Some(buff_val) => { - let buff = buff_val - .expect_buff(33) - .map_err(|_| "Failed to get buffer value")?; - Ok(Some(buff)) - } - None => Ok(None), - } -} - -/// Use the read-only to check if the aggregate key is set for a given reward cycle -pub fn is_key_set_for_cycle( - reward_cycle: u64, - is_mainnet: bool, - http_origin: &str, -) -> Result { - let key = get_key_for_cycle(reward_cycle, is_mainnet, &http_origin)?; - Ok(key.is_some()) + .map(|v| v.is_some()) + .map_err(|_| "Response is not optional".to_string()) } fn signer_vote_if_needed( @@ -1015,7 +964,7 @@ fn simple_neon_integration() { } info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); + blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -1244,7 +1193,7 @@ fn mine_multiple_per_tenure_integration() { .stacks_block_height; info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); + blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { @@ -1550,7 +1499,7 @@ fn correct_burn_outs() { ); info!("Bootstrapped to Epoch-3.0 boundary, Epoch2x miner should stop"); - blind_signer(&naka_conf, &signers, proposals_submitted); + blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted); // we should already be able to query the stacker set via RPC let burnchain = naka_conf.get_burnchain(); @@ -1630,10 +1579,7 @@ fn correct_burn_outs() { let new_blocks_with_reward_set: Vec = test_observer::get_blocks() .into_iter() - .filter(|block| { - block.get("reward_set").map_or(false, |v| !v.is_null()) - && block.get("cycle_number").map_or(false, |v| !v.is_null()) - }) + .filter(|block| block.get("reward_set").is_some() && block.get("cycle_number").is_some()) .collect(); info!( "Announced blocks that include reward sets: {:#?}", @@ -1759,7 +1705,7 @@ fn block_proposal_api_endpoint() { ); info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - blind_signer(&conf, &signers, proposals_submitted); + blind_signer(&conf, &signers, &sender_signer_sk, proposals_submitted); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -2127,7 +2073,7 @@ fn miner_writes_proposed_block_to_stackerdb() { ); info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); + blind_signer(&naka_conf, &signers, &sender_signer_sk, proposals_submitted); // first block wakes up the run loop, wait until a key registration has been submitted. next_block_and(&mut btc_regtest_controller, 60, || { let vrf_count = vrfs_submitted.load(Ordering::SeqCst); @@ -2164,13 +2110,14 @@ fn miner_writes_proposed_block_to_stackerdb() { let mut miners_stackerdb = StackerDBSession::new(&naka_conf.node.rpc_bind, miner_contract_id); miners_stackerdb - .get_latest(slot_id.start) + .get_latest(slot_id) .expect("Failed to get latest chunk from the miner slot ID") .expect("No chunk found") }; let proposed_block_hash = format!("0x{}", proposed_block.header.block_hash()); let mut proposed_zero_block = proposed_block.clone(); + proposed_zero_block.header.miner_signature = MessageSignature::empty(); proposed_zero_block.header.signer_signature = ThresholdSignature::empty(); let proposed_zero_block_hash = format!("0x{}", proposed_zero_block.header.block_hash()); @@ -2196,943 +2143,9 @@ fn miner_writes_proposed_block_to_stackerdb() { "proposed_block_hash" => &proposed_block_hash, ); - let signer_bitvec_str = observed_block.signer_bitvec.clone(); - let signer_bitvec_bytes = hex_bytes(&signer_bitvec_str).unwrap(); - let signer_bitvec = BitVec::<4000>::consensus_deserialize(&mut signer_bitvec_bytes.as_slice()) - .expect("Failed to deserialize signer bitvec"); - - assert_eq!(signer_bitvec.len(), 1); - assert_eq!( format!("0x{}", observed_block.block_hash), proposed_zero_block_hash, "Observed miner hash should match the proposed block read from StackerDB (after zeroing signatures)" ); } - -#[test] -#[ignore] -fn vote_for_aggregate_key_burn_op() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let signers = TestSigners::default(); - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let _http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let signer_sk = Secp256k1PrivateKey::new(); - let signer_addr = tests::to_addr(&signer_sk); - - naka_conf.add_initial_balance(PrincipalData::from(signer_addr.clone()).to_string(), 100000); - let stacker_sk = setup_stacker(&mut naka_conf); - - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - btc_regtest_controller.bootstrap_chain(201); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_submitted_vrfs: vrfs_submitted, - naka_submitted_commits: commits_submitted, - naka_proposed_blocks: proposals_submitted, - .. - } = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - - let run_loop_thread = thread::Builder::new() - .name("run_loop".into()) - .spawn(move || run_loop.start(None, 0)) - .unwrap(); - wait_for_runloop(&blocks_processed); - boot_to_epoch_3( - &naka_conf, - &blocks_processed, - &[stacker_sk], - &[signer_sk], - Some(&signers), - &mut btc_regtest_controller, - ); - - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - - let burnchain = naka_conf.get_burnchain(); - let _sortdb = burnchain.open_sortition_db(true).unwrap(); - let (_chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - - // submit a pre-stx op - let mut miner_signer = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); - info!("Submitting pre-stx op"); - let pre_stx_op = PreStxOp { - output: signer_addr.clone(), - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - BlockstackOperationType::PreStx(pre_stx_op), - &mut miner_signer, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - - // Mine until the next prepare phase - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - let prepare_phase_start = btc_regtest_controller - .get_burnchain() - .pox_constants - .prepare_phase_start( - btc_regtest_controller.get_burnchain().first_block_height, - reward_cycle, - ); - - let blocks_until_prepare = prepare_phase_start + 1 - block_height; - - info!( - "Mining until prepare phase start."; - "prepare_phase_start" => prepare_phase_start, - "block_height" => block_height, - "blocks_until_prepare" => blocks_until_prepare, - ); - - for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); - } - - let reward_cycle = reward_cycle + 1; - - let signer_index = 0; - - info!( - "Submitting vote for aggregate key op"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - "signer_index" => %signer_index, - ); - - let stacker_pk = StacksPublicKey::from_private(&stacker_sk); - let signer_key: StacksPublicKeyBuffer = stacker_pk.to_bytes_compressed().as_slice().into(); - let aggregate_key = signer_key.clone(); - - let vote_for_aggregate_key_op = - BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { - signer_key, - signer_index, - sender: signer_addr.clone(), - round: 0, - reward_cycle, - aggregate_key, - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }); - - let mut signer_burnop_signer = BurnchainOpSigner::new(signer_sk.clone(), false); - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - vote_for_aggregate_key_op, - &mut signer_burnop_signer, - 1 - ) - .is_some(), - "Vote for aggregate key operation should submit successfully" - ); - - info!("Submitted vote for aggregate key op at height {block_height}, mining a few blocks..."); - - // the second block should process the vote, after which the vote should be set - for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); - } - - let mut vote_for_aggregate_key_found = false; - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); - let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); - if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); - panic!("unexpected btc transaction type"); - } - let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); - let agg_key = vote_obj - .get("aggregate_key") - .expect("Expected aggregate_key key in burn op") - .as_str() - .unwrap(); - assert_eq!(agg_key, aggregate_key.to_hex()); - - vote_for_aggregate_key_found = true; - } - } - } - assert!( - vote_for_aggregate_key_found, - "Expected vote for aggregate key op" - ); - - // Check that the correct key was set - let saved_key = get_key_for_cycle(reward_cycle, false, &naka_conf.node.rpc_bind) - .expect("Expected to be able to check key is set after voting") - .expect("Expected aggregate key to be set"); - - assert_eq!(saved_key, aggregate_key.as_bytes().to_vec()); - - coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper.store(false, Ordering::SeqCst); - - run_loop_thread.join().unwrap(); -} - -/// This test boots a follower node using the block downloader -#[test] -#[ignore] -fn follower_bootup() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let signers = TestSigners::default(); - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - let sender_sk = Secp256k1PrivateKey::new(); - let sender_signer_sk = Secp256k1PrivateKey::new(); - let sender_signer_addr = tests::to_addr(&sender_signer_sk); - let tenure_count = 5; - let inter_blocks_per_tenure = 9; - // setup sender + recipient for some test stx transfers - // these are necessary for the interim blocks to get mined at all - let sender_addr = tests::to_addr(&sender_sk); - let send_amt = 100; - let send_fee = 180; - naka_conf.add_initial_balance( - PrincipalData::from(sender_addr.clone()).to_string(), - (send_amt + send_fee) * tenure_count * inter_blocks_per_tenure, - ); - naka_conf.add_initial_balance( - PrincipalData::from(sender_signer_addr.clone()).to_string(), - 100000, - ); - let recipient = PrincipalData::from(StacksAddress::burn_address(false)); - let stacker_sk = setup_stacker(&mut naka_conf); - - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - btc_regtest_controller.bootstrap_chain(201); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_submitted_vrfs: vrfs_submitted, - naka_submitted_commits: commits_submitted, - naka_proposed_blocks: proposals_submitted, - .. - } = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - - let run_loop_thread = thread::Builder::new() - .name("run_loop".into()) - .spawn(move || run_loop.start(None, 0)) - .unwrap(); - wait_for_runloop(&blocks_processed); - boot_to_epoch_3( - &naka_conf, - &blocks_processed, - &[stacker_sk], - &[sender_signer_sk], - Some(&signers), - &mut btc_regtest_controller, - ); - - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - - let burnchain = naka_conf.get_burnchain(); - let sortdb = burnchain.open_sortition_db(true).unwrap(); - let (chainstate, _) = StacksChainState::open( - naka_conf.is_mainnet(), - naka_conf.burnchain.chain_id, - &naka_conf.get_chainstate_path_str(), - None, - ) - .unwrap(); - - let block_height_pre_3_0 = - NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap() - .stacks_block_height; - - info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); - - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - - let mut follower_conf = naka_conf.clone(); - follower_conf.events_observers.clear(); - follower_conf.node.working_dir = format!("{}-follower", &naka_conf.node.working_dir); - follower_conf.node.seed = vec![0x01; 32]; - follower_conf.node.local_peer_seed = vec![0x02; 32]; - - let mut rng = rand::thread_rng(); - let mut buf = [0u8; 8]; - rng.fill_bytes(&mut buf); - - let rpc_port = u16::from_be_bytes(buf[0..2].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - let p2p_port = u16::from_be_bytes(buf[2..4].try_into().unwrap()).saturating_add(1025) - 1; // use a non-privileged port between 1024 and 65534 - - let localhost = "127.0.0.1"; - follower_conf.node.rpc_bind = format!("{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_bind = format!("{}:{}", &localhost, p2p_port); - follower_conf.node.data_url = format!("http://{}:{}", &localhost, rpc_port); - follower_conf.node.p2p_address = format!("{}:{}", &localhost, p2p_port); - - let node_info = get_chain_info(&naka_conf); - follower_conf.node.add_bootstrap_node( - &format!( - "{}@{}", - &node_info.node_public_key.unwrap(), - naka_conf.node.p2p_bind - ), - CHAIN_ID_TESTNET, - PEER_VERSION_TESTNET, - ); - - let mut follower_run_loop = boot_nakamoto::BootRunLoop::new(follower_conf.clone()).unwrap(); - let follower_run_loop_stopper = follower_run_loop.get_termination_switch(); - let follower_coord_channel = follower_run_loop.coordinator_channels(); - - debug!( - "Booting follower-thread ({},{})", - &follower_conf.node.p2p_bind, &follower_conf.node.rpc_bind - ); - debug!( - "Booting follower-thread: neighbors = {:?}", - &follower_conf.node.bootstrap_node - ); - - // spawn a follower thread - let follower_thread = thread::Builder::new() - .name("follower-thread".into()) - .spawn(move || follower_run_loop.start(None, 0)) - .unwrap(); - - debug!("Booted follower-thread"); - - // Mine `tenure_count` nakamoto tenures - for tenure_ix in 0..tenure_count { - let commits_before = commits_submitted.load(Ordering::SeqCst); - next_block_and_process_new_stacks_block(&mut btc_regtest_controller, 60, &coord_channel) - .unwrap(); - - let mut last_tip = BlockHeaderHash([0x00; 32]); - let mut last_tip_height = 0; - - // mine the interim blocks - for interim_block_ix in 0..inter_blocks_per_tenure { - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - // submit a tx so that the miner will mine an extra block - let sender_nonce = tenure_ix * inter_blocks_per_tenure + interim_block_ix; - let transfer_tx = - make_stacks_transfer(&sender_sk, sender_nonce, send_fee, &recipient, send_amt); - submit_tx(&http_origin, &transfer_tx); - - loop { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - if blocks_processed > blocks_processed_before { - break; - } - thread::sleep(Duration::from_millis(100)); - } - - let info = get_chain_info_result(&naka_conf).unwrap(); - assert_ne!(info.stacks_tip, last_tip); - assert_ne!(info.stacks_tip_height, last_tip_height); - - last_tip = info.stacks_tip; - last_tip_height = info.stacks_tip_height; - } - - let start_time = Instant::now(); - while commits_submitted.load(Ordering::SeqCst) <= commits_before { - if start_time.elapsed() >= Duration::from_secs(20) { - panic!("Timed out waiting for block-commit"); - } - thread::sleep(Duration::from_millis(100)); - } - } - - // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 - let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) - .unwrap() - .unwrap(); - info!( - "Latest tip"; - "height" => tip.stacks_block_height, - "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), - ); - - assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); - assert_eq!( - tip.stacks_block_height, - block_height_pre_3_0 + ((inter_blocks_per_tenure + 1) * tenure_count), - "Should have mined (1 + interim_blocks_per_tenure) * tenure_count nakamoto blocks" - ); - - // wait for follower to reach the chain tip - loop { - sleep_ms(1000); - let follower_node_info = get_chain_info(&follower_conf); - - info!( - "Follower tip is now {}/{}", - &follower_node_info.stacks_tip_consensus_hash, &follower_node_info.stacks_tip - ); - if follower_node_info.stacks_tip_consensus_hash == tip.consensus_hash - && follower_node_info.stacks_tip == tip.anchored_header.block_hash() - { - break; - } - } - - coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper.store(false, Ordering::SeqCst); - - follower_coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - follower_run_loop_stopper.store(false, Ordering::SeqCst); - - run_loop_thread.join().unwrap(); - follower_thread.join().unwrap(); -} - -#[test] -#[ignore] -fn stack_stx_burn_op_integration_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let signers = TestSigners::default(); - let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); - naka_conf.burnchain.satoshis_per_byte = 2; - naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(1); - - let signer_sk_1 = setup_stacker(&mut naka_conf); - let signer_addr_1 = tests::to_addr(&signer_sk_1); - - let signer_sk_2 = Secp256k1PrivateKey::new(); - let signer_addr_2 = tests::to_addr(&signer_sk_2); - - let stacker_sk = setup_stacker(&mut naka_conf); - - test_observer::spawn(); - let observer_port = test_observer::EVENT_OBSERVER_PORT; - naka_conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{observer_port}"), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); - btc_regtest_controller.bootstrap_chain(201); - - let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); - - let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); - let run_loop_stopper = run_loop.get_termination_switch(); - let Counters { - blocks_processed, - naka_submitted_vrfs: vrfs_submitted, - naka_submitted_commits: commits_submitted, - naka_proposed_blocks: proposals_submitted, - .. - } = run_loop.counters(); - - let coord_channel = run_loop.coordinator_channels(); - - let run_loop_thread = thread::Builder::new() - .name("run_loop".into()) - .spawn(move || run_loop.start(None, 0)) - .unwrap(); - wait_for_runloop(&blocks_processed); - boot_to_epoch_3( - &naka_conf, - &blocks_processed, - &[stacker_sk], - &[signer_sk_1], - Some(&signers), - &mut btc_regtest_controller, - ); - - info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); - - info!("Nakamoto miner started..."); - blind_signer(&naka_conf, &signers, proposals_submitted); - // first block wakes up the run loop, wait until a key registration has been submitted. - next_block_and(&mut btc_regtest_controller, 60, || { - let vrf_count = vrfs_submitted.load(Ordering::SeqCst); - Ok(vrf_count >= 1) - }) - .unwrap(); - - // second block should confirm the VRF register, wait until a block commit is submitted - next_block_and(&mut btc_regtest_controller, 60, || { - let commits_count = commits_submitted.load(Ordering::SeqCst); - Ok(commits_count >= 1) - }) - .unwrap(); - - let block_height = btc_regtest_controller.get_headers_height(); - - // submit a pre-stx op - let mut miner_signer_1 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); - - info!("Submitting first pre-stx op"); - let pre_stx_op = PreStxOp { - output: signer_addr_1.clone(), - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - BlockstackOperationType::PreStx(pre_stx_op), - &mut miner_signer_1, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); - - let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); - info!("Submitting second pre-stx op"); - let pre_stx_op_2 = PreStxOp { - output: signer_addr_2.clone(), - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - BlockstackOperationType::PreStx(pre_stx_op_2), - &mut miner_signer_2, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); - - // Mine until the next prepare phase - let block_height = btc_regtest_controller.get_headers_height(); - let reward_cycle = btc_regtest_controller - .get_burnchain() - .block_height_to_reward_cycle(block_height) - .unwrap(); - let prepare_phase_start = btc_regtest_controller - .get_burnchain() - .pox_constants - .prepare_phase_start( - btc_regtest_controller.get_burnchain().first_block_height, - reward_cycle, - ); - - let blocks_until_prepare = prepare_phase_start + 1 - block_height; - - let lock_period: u8 = 6; - let topic = Pox4SignatureTopic::StackStx; - let auth_id: u32 = 1; - let pox_addr = PoxAddress::Standard(signer_addr_1, Some(AddressHashMode::SerializeP2PKH)); - - info!( - "Submitting set-signer-key-authorization"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - ); - - let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); - let signer_key_arg_1: StacksPublicKeyBuffer = - signer_pk_1.to_bytes_compressed().as_slice().into(); - - let set_signer_key_auth_tx = tests::make_contract_call( - &signer_sk_1, - 1, - 500, - &StacksAddress::burn_address(false), - "pox-4", - "set-signer-key-authorization", - &[ - clarity::vm::Value::Tuple(pox_addr.clone().as_clarity_tuple().unwrap()), - clarity::vm::Value::UInt(lock_period.into()), - clarity::vm::Value::UInt(reward_cycle.into()), - clarity::vm::Value::string_ascii_from_bytes(topic.get_name_str().into()).unwrap(), - clarity::vm::Value::buff_from(signer_pk_1.clone().to_bytes_compressed()).unwrap(), - clarity::vm::Value::Bool(true), - clarity::vm::Value::UInt(u128::MAX), - clarity::vm::Value::UInt(auth_id.into()), - ], - ); - - submit_tx(&http_origin, &set_signer_key_auth_tx); - - info!( - "Mining until prepare phase start."; - "prepare_phase_start" => prepare_phase_start, - "block_height" => block_height, - "blocks_until_prepare" => blocks_until_prepare, - ); - - for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); - } - - let reward_cycle = reward_cycle + 1; - - info!( - "Submitting stack stx op"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - ); - - let mut signer_burnop_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - let mut signer_burnop_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - - info!( - "Before stack-stx op, signer 1 total: {}", - btc_regtest_controller - .get_utxos( - StacksEpochId::Epoch30, - &signer_burnop_signer_1.get_public_key(), - 1, - None, - block_height - ) - .unwrap() - .total_available(), - ); - info!( - "Before stack-stx op, signer 2 total: {}", - btc_regtest_controller - .get_utxos( - StacksEpochId::Epoch30, - &signer_burnop_signer_2.get_public_key(), - 1, - None, - block_height - ) - .unwrap() - .total_available(), - ); - - info!("Signer 1 addr: {}", signer_addr_1.to_b58()); - info!("Signer 2 addr: {}", signer_addr_2.to_b58()); - - let pox_info = get_pox_info(&http_origin).unwrap(); - let min_stx = pox_info.next_cycle.min_threshold_ustx; - - let stack_stx_op_with_some_signer_key = StackStxOp { - sender: signer_addr_1.clone(), - reward_addr: pox_addr, - stacked_ustx: min_stx.into(), - num_cycles: lock_period, - signer_key: Some(signer_key_arg_1), - max_amount: Some(u128::MAX), - auth_id: Some(auth_id), - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }; - - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - BlockstackOperationType::StackStx(stack_stx_op_with_some_signer_key), - &mut signer_burnop_signer_1, - 1 - ) - .is_some(), - "Stack STX operation should submit successfully" - ); - - let stack_stx_op_with_no_signer_key = StackStxOp { - sender: signer_addr_2.clone(), - reward_addr: PoxAddress::Standard(signer_addr_2, None), - stacked_ustx: 100000, - num_cycles: 6, - signer_key: None, - max_amount: None, - auth_id: None, - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }; - - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch30, - BlockstackOperationType::StackStx(stack_stx_op_with_no_signer_key), - &mut signer_burnop_signer_2, - 1 - ) - .is_some(), - "Stack STX operation should submit successfully" - ); - - info!("Submitted 2 stack STX ops at height {block_height}, mining a few blocks..."); - - // the second block should process the vote, after which the balances should be unchanged - for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); - } - - let mut stack_stx_found = false; - let mut stack_stx_burn_op_tx_count = 0; - let blocks = test_observer::get_blocks(); - info!("stack event observer num blocks: {:?}", blocks.len()); - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - info!( - "stack event observer num transactions: {:?}", - transactions.len() - ); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); - let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); - if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); - panic!("unexpected btc transaction type"); - } - let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); - let signer_key_found = stack_stx_obj - .get("signer_key") - .expect("Expected signer_key in burn op") - .as_str() - .unwrap(); - assert_eq!(signer_key_found, signer_key_arg_1.to_hex()); - - let max_amount_correct = stack_stx_obj - .get("max_amount") - .expect("Expected max_amount") - .as_number() - .expect("Expected max_amount to be a number") - .eq(&serde_json::Number::from(u128::MAX)); - assert!(max_amount_correct, "Expected max_amount to be u128::MAX"); - - let auth_id_correct = stack_stx_obj - .get("auth_id") - .expect("Expected auth_id in burn op") - .as_number() - .expect("Expected auth id") - .eq(&serde_json::Number::from(auth_id)); - assert!(auth_id_correct, "Expected auth_id to be 1"); - - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = - clarity::vm::Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - info!("Clarity result of stack-stx op: {parsed}"); - parsed - .expect_result_ok() - .expect("Expected OK result for stack-stx op"); - - stack_stx_found = true; - stack_stx_burn_op_tx_count += 1; - } - } - } - assert!(stack_stx_found, "Expected stack STX op"); - assert_eq!( - stack_stx_burn_op_tx_count, 1, - "Stack-stx tx without a signer_key shouldn't have been submitted" - ); - - let sortdb = btc_regtest_controller.sortdb_mut(); - let sortdb_conn = sortdb.conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb_conn).unwrap(); - - let ancestor_burnchain_header_hashes = - SortitionDB::get_ancestor_burnchain_header_hashes(sortdb.conn(), &tip.burn_header_hash, 6) - .unwrap(); - - let mut all_stacking_burn_ops = vec![]; - let mut found_none = false; - let mut found_some = false; - // go from oldest burn header hash to newest - for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { - let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); - for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); - match stacking_op.signer_key { - Some(_) => found_some = true, - None => found_none = true, - } - all_stacking_burn_ops.push(stacking_op); - } - } - assert_eq!( - all_stacking_burn_ops.len(), - 2, - "Both stack-stx ops with and without a signer_key should be considered valid." - ); - assert!( - found_none, - "Expected one stacking_op to have a signer_key of None" - ); - assert!( - found_some, - "Expected one stacking_op to have a signer_key of Some" - ); - - coord_channel - .lock() - .expect("Mutex poisoned") - .stop_chains_coordinator(); - run_loop_stopper.store(false, Ordering::SeqCst); - - run_loop_thread.join().unwrap(); -} diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index ccb079e65ca..5ea49c1cc91 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -3,7 +3,7 @@ use std::path::Path; use std::sync::atomic::{AtomicU64, Ordering}; use std::sync::{mpsc, Arc}; use std::time::{Duration, Instant}; -use std::{cmp, env, fs, io, thread}; +use std::{cmp, env, fs, thread}; use clarity::vm::ast::stack_depth_checker::AST_CALL_STACK_DEPTH_BUFFER; use clarity::vm::ast::ASTRules; @@ -19,13 +19,10 @@ use stacks::burnchains::db::BurnchainDB; use stacks::burnchains::{Address, Burnchain, PoxConstants, Txid}; use stacks::chainstate::burn::db::sortdb::SortitionDB; use stacks::chainstate::burn::operations::{ - BlockstackOperationType, DelegateStxOp, PreStxOp, StackStxOp, TransferStxOp, - VoteForAggregateKeyOp, + BlockstackOperationType, DelegateStxOp, PreStxOp, TransferStxOp, }; use stacks::chainstate::burn::ConsensusHash; use stacks::chainstate::coordinator::comm::CoordinatorChannels; -use stacks::chainstate::stacks::address::PoxAddress; -use stacks::chainstate::stacks::boot::POX_4_NAME; use stacks::chainstate::stacks::db::StacksChainState; use stacks::chainstate::stacks::miner::{ signal_mining_blocked, signal_mining_ready, TransactionErrorEvent, TransactionEvent, @@ -36,18 +33,17 @@ use stacks::chainstate::stacks::{ StacksPublicKey, StacksTransaction, TransactionContractCall, TransactionPayload, }; use stacks::clarity_cli::vm_execute as execute; +use stacks::core; use stacks::core::mempool::MemPoolWalkTxTypes; use stacks::core::{ - self, StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, + StacksEpoch, StacksEpochId, BLOCK_LIMIT_MAINNET_20, BLOCK_LIMIT_MAINNET_205, BLOCK_LIMIT_MAINNET_21, CHAIN_ID_TESTNET, HELIUM_BLOCK_LIMIT_20, PEER_VERSION_EPOCH_1_0, PEER_VERSION_EPOCH_2_0, PEER_VERSION_EPOCH_2_05, PEER_VERSION_EPOCH_2_1, - PEER_VERSION_EPOCH_2_2, PEER_VERSION_EPOCH_2_3, PEER_VERSION_EPOCH_2_4, PEER_VERSION_EPOCH_2_5, }; use stacks::net::api::getaccount::AccountEntryResponse; use stacks::net::api::getcontractsrc::ContractSrcResponse; use stacks::net::api::getinfo::RPCPeerInfoData; use stacks::net::api::getpoxinfo::RPCPoxInfoData; -use stacks::net::api::getstackers::GetStackersResponse; use stacks::net::api::gettransaction_unconfirmed::UnconfirmedTransactionResponse; use stacks::net::api::postblock::StacksBlockAcceptedData; use stacks::net::api::postfeerate::RPCFeeEstimateResponse; @@ -56,17 +52,12 @@ use stacks::net::atlas::{ AtlasConfig, AtlasDB, GetAttachmentResponse, GetAttachmentsInvResponse, MAX_ATTACHMENT_INV_PAGES_PER_REQUEST, }; -use stacks::util_lib::boot::{boot_code_addr, boot_code_id}; +use stacks::util_lib::boot::boot_code_id; use stacks::util_lib::db::{query_row_columns, query_rows, u64_to_sql}; -use stacks::util_lib::signed_structured_data::pox4::{ - make_pox_4_signer_key_signature, Pox4SignatureTopic, -}; -use stacks_common::address::AddressHashMode; use stacks_common::codec::StacksMessageCodec; use stacks_common::types::chainstate::{ BlockHeaderHash, BurnchainHeaderHash, StacksAddress, StacksBlockId, }; -use stacks_common::types::StacksPublicKeyBuffer; use stacks_common::util::hash::{bytes_to_hex, hex_bytes, to_hex, Hash160}; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; use stacks_common::util::{get_epoch_time_ms, get_epoch_time_secs, sleep_ms}; @@ -83,7 +74,6 @@ use crate::neon_node::RelayerThread; use crate::operations::BurnchainOpSigner; use crate::stacks_common::types::PrivateKey; use crate::syncctl::PoxSyncWatchdogComms; -use crate::tests::nakamoto_integrations::get_key_for_cycle; use crate::util::hash::{MerkleTree, Sha512Trunc256Sum}; use crate::util::secp256k1::MessageSignature; use crate::{neon, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain}; @@ -157,8 +147,8 @@ fn inner_neon_integration_test_conf(seed: Option>) -> (Config, StacksAdd conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 0; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; // if there's just one node, then this must be true for tests to pass conf.miner.wait_for_block_download = false; @@ -1297,16 +1287,6 @@ pub fn get_contract_src( } } -pub fn get_stacker_set(http_origin: &str, reward_cycle: u64) -> GetStackersResponse { - let client = reqwest::blocking::Client::new(); - let path = format!("{}/v2/stacker_set/{}", http_origin, reward_cycle); - let res = client.get(&path).send().unwrap(); - - info!("Got stacker_set response {:?}", &res); - let res = res.json::().unwrap(); - res -} - #[test] #[ignore] fn deep_contract() { @@ -1999,674 +1979,9 @@ fn stx_delegate_btc_integration_test() { address: spender_addr.clone(), amount: 100300, }); - conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), - amount: 300, - }); - - // update epoch info so that Epoch 2.1 takes effect - conf.burnchain.epochs = Some(vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 2, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 2, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - ]); - conf.burnchain.pox_2_activation = Some(3); - - test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - // reward cycle length = 5, so 3 reward cycle slots + 2 prepare-phase burns - let reward_cycle_len = 5; - let prepare_phase_len = 2; - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 2, - 5, - 15, - (16 * reward_cycle_len - 1).into(), - (17 * reward_cycle_len).into(), - u32::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - ); - burnchain_config.pox_constants = pox_constants.clone(); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - let http_origin = format!("http://{}", &conf.node.rpc_bind); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - test_observer::clear(); - - // Mine a few more blocks so that Epoch 2.1 (and thus pox-2) can take effect. - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // okay, let's send a pre-stx op. - let pre_stx_op = PreStxOp { - output: spender_stx_addr.clone(), - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - - let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); - - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch21, - BlockstackOperationType::PreStx(pre_stx_op), - &mut miner_signer, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // let's fire off our delegate op. - let del_stx_op = DelegateStxOp { - sender: spender_stx_addr.clone(), - delegate_to: recipient_addr.clone(), - reward_addr: None, - delegated_ustx: 100_000, - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - until_burn_height: None, - }; - - let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch21, - BlockstackOperationType::DelegateStx(del_stx_op), - &mut spender_signer, - 1 - ) - .is_some(), - "Delegate operation should submit successfully" - ); - - // the second block should process the delegation, after which the balaces should be unchanged - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - assert_eq!(get_balance(&http_origin, &spender_addr), 100300); - assert_eq!(get_balance(&http_origin, &recipient_addr), 300); - - // send a delegate-stack-stx transaction - let sort_height = channel.get_sortitions_processed(); - let tx = make_contract_call( - &recipient_sk, - 0, - 293, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox-2", - "delegate-stack-stx", - &[ - Value::Principal(spender_addr.clone()), - Value::UInt(100_000), - execute( - &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), - ClarityVersion::Clarity2, - ) - .unwrap() - .unwrap(), - Value::UInt(sort_height as u128), - Value::UInt(6), - ], - ); - - // push the stacking transaction - submit_tx(&http_origin, &tx); - - // let's mine until the next reward cycle starts ... - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // check the locked amount for the spender account - let account = get_account(&http_origin, &spender_stx_addr); - assert_eq!(account.locked, 100_000); - - let mut delegate_stack_stx_found = false; - let mut delegate_stx_found = false; - let blocks = test_observer::get_blocks(); - for block in blocks.iter() { - let events = block.get("events").unwrap().as_array().unwrap(); - for event in events.iter() { - let event_type = event.get("type").unwrap().as_str().unwrap(); - if event_type == "contract_event" { - let contract_event = event.get("contract_event").unwrap().as_object().unwrap(); - - // Check that it is a print event - let sub_type = contract_event.get("topic").unwrap().as_str().unwrap(); - assert_eq!(sub_type, "print"); - - // Ensure that the function name is as expected - // This verifies that there were print events for delegate-stack-stx and delegate-stx - let name_field = - &contract_event["value"]["Response"]["data"]["Tuple"]["data_map"]["name"]; - let name_data = name_field["Sequence"]["String"]["ASCII"]["data"] - .as_array() - .unwrap(); - let ascii_vec = name_data - .iter() - .map(|num| num.as_u64().unwrap() as u8) - .collect(); - let name = String::from_utf8(ascii_vec).unwrap(); - if name == "delegate-stack-stx" { - delegate_stack_stx_found = true; - } else if name == "delegate-stx" { - delegate_stx_found = true; - } - } - } - } - assert!(delegate_stx_found); - assert!(delegate_stack_stx_found); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn stack_stx_burn_op_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sk_1 = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_stx_addr_1: StacksAddress = to_addr(&spender_sk_1); - let spender_addr_1: PrincipalData = spender_stx_addr_1.clone().into(); - - let spender_sk_2 = StacksPrivateKey::from_hex(SK_2).unwrap(); - let spender_stx_addr_2: StacksAddress = to_addr(&spender_sk_2); - - let recipient_sk = StacksPrivateKey::new(); - let recipient_addr = to_addr(&recipient_sk); - - let (mut conf, _miner_account) = neon_integration_test_conf(); - - let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let second_bal = 2_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - - conf.initial_balances.push(InitialBalance { - address: spender_addr_1.clone(), - amount: first_bal, - }); - conf.initial_balances.push(InitialBalance { - address: recipient_addr.clone().into(), - amount: second_bal, - }); - - // update epoch info so that Epoch 2.1 takes effect - conf.burnchain.epochs = Some(vec![ - StacksEpoch { - epoch_id: StacksEpochId::Epoch20, - start_height: 0, - end_height: 1, - block_limit: BLOCK_LIMIT_MAINNET_20.clone(), - network_epoch: PEER_VERSION_EPOCH_2_0, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch2_05, - start_height: 1, - end_height: 2, - block_limit: BLOCK_LIMIT_MAINNET_205.clone(), - network_epoch: PEER_VERSION_EPOCH_2_05, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch21, - start_height: 2, - end_height: 3, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: 3, - end_height: 4, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch23, - start_height: 4, - end_height: 5, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_3, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch24, - start_height: 5, - end_height: 6, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_4, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch25, - start_height: 6, - end_height: 9223372036854775807, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_5, - }, - ]); - conf.burnchain.pox_2_activation = Some(3); - - test_observer::spawn(); - conf.events_observers.insert(EventObserverConfig { - endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), - events_keys: vec![EventKeyType::AnyEvent], - }); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .map_err(|_e| ()) - .expect("Failed starting bitcoind"); - - let mut burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); - - // reward cycle length = 5, so 3 reward cycle slots + 2 prepare-phase burns - let reward_cycle_len = 5; - let prepare_phase_len = 2; - let pox_constants = PoxConstants::new( - reward_cycle_len, - prepare_phase_len, - 2, - 5, - 15, - (16 * reward_cycle_len - 1).into(), - (17 * reward_cycle_len).into(), - u32::MAX, - u32::MAX, - u32::MAX, - u32::MAX, - ); - burnchain_config.pox_constants = pox_constants.clone(); - - let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( - conf.clone(), - None, - Some(burnchain_config.clone()), - None, - ); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - test_observer::clear(); - - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - - let signer_sk_1 = spender_sk_1.clone(); - let signer_sk_2 = spender_sk_2.clone(); - let signer_pk_1 = StacksPublicKey::from_private(&signer_sk_1); - - let pox_addr = PoxAddress::Standard(spender_stx_addr_1, Some(AddressHashMode::SerializeP2PKH)); - - let mut block_height = channel.get_sortitions_processed(); - - let signer_pk_bytes = signer_pk_1.to_bytes_compressed(); - - // Submitting 2 pre-stx operations - let mut miner_signer_1 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); - let pre_stx_op_1 = PreStxOp { - output: spender_stx_addr_1, - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch25, - BlockstackOperationType::PreStx(pre_stx_op_1), - &mut miner_signer_1, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - - let mut miner_signer_2 = Keychain::default(conf.node.seed.clone()).generate_op_signer(); - let pre_stx_op_2 = PreStxOp { - output: spender_stx_addr_2.clone(), - // to be filled in - txid: Txid([0u8; 32]), - vtxindex: 0, - block_height: 0, - burn_header_hash: BurnchainHeaderHash([0u8; 32]), - }; - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch25, - BlockstackOperationType::PreStx(pre_stx_op_2), - &mut miner_signer_2, - 1 - ) - .is_some(), - "Pre-stx operation should submit successfully" - ); - info!("Submitted 2 pre-stx ops at block {block_height}, mining a few blocks..."); - - let reward_cycle = burnchain_config - .block_height_to_reward_cycle(block_height) - .unwrap() - + 1; - - let lock_period = 6; - let topic = Pox4SignatureTopic::StackStx; - let auth_id: u32 = 1; - - info!( - "Submitting set-signer-key-authorization"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - ); - - let set_signer_key_auth_tx = make_contract_call( - &signer_sk_1, - 0, - 500, - &boot_code_addr(false), - POX_4_NAME, - "set-signer-key-authorization", - &[ - Value::Tuple(pox_addr.as_clarity_tuple().unwrap()), - Value::UInt(lock_period), - Value::UInt(reward_cycle.into()), - Value::string_ascii_from_bytes(topic.get_name_str().into()).unwrap(), - Value::buff_from(signer_pk_bytes.clone()).unwrap(), - Value::Bool(true), - Value::UInt(u128::MAX), - Value::UInt(auth_id.into()), - ], - ); - - // push the stacking transaction - let http_origin = format!("http://{}", &conf.node.rpc_bind); - submit_tx(&http_origin, &set_signer_key_auth_tx); - - // Wait a few blocks to be registered - for _i in 0..3 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - block_height = channel.get_sortitions_processed(); - } - - let reward_cycle = burnchain_config - .block_height_to_reward_cycle(block_height) - .unwrap(); - - let signer_key: StacksPublicKeyBuffer = signer_pk_bytes.clone().as_slice().into(); - - info!( - "Submitting stack stx op"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - ); - - // `stacked_ustx` should be large enough to avoid ERR_STACKING_THRESHOLD_NOT_MET from Clarity - let stack_stx_op_with_some_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_1.clone(), - reward_addr: pox_addr.clone(), - stacked_ustx: 10000000000000, - num_cycles: 6, - signer_key: Some(signer_key), - max_amount: Some(u128::MAX), - auth_id: Some(auth_id.into()), - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }); - - let mut spender_signer_1 = BurnchainOpSigner::new(signer_sk_1.clone(), false); - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch25, - stack_stx_op_with_some_signer_key, - &mut spender_signer_1, - 1 - ) - .is_some(), - "Stack STX operation with some signer key should submit successfully" - ); - - let stack_stx_op_with_no_signer_key = BlockstackOperationType::StackStx(StackStxOp { - sender: spender_stx_addr_2.clone(), - reward_addr: pox_addr.clone(), - stacked_ustx: 10000000000000, - num_cycles: 6, - signer_key: None, - max_amount: None, - auth_id: None, - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }); - - let mut spender_signer_2 = BurnchainOpSigner::new(signer_sk_2.clone(), false); - assert!( - btc_regtest_controller - .submit_operation( - StacksEpochId::Epoch25, - stack_stx_op_with_no_signer_key, - &mut spender_signer_2, - 1 - ) - .is_some(), - "Stack STX operation with no signer key should submit successfully" - ); - - info!("Submitted 2 stack STX ops at height {block_height}, mining a few blocks..."); - - // the second block should process the vote, after which the balaces should be unchanged - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut stack_stx_found = false; - let mut stack_stx_burn_op_tx_count = 0; - let blocks = test_observer::get_blocks(); - info!("stack event observer num blocks: {:?}", blocks.len()); - for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - info!( - "stack event observer num transactions: {:?}", - transactions.len() - ); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - info!("Found a burn op: {:?}", tx); - let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); - if !burnchain_op.contains_key("stack_stx") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); - panic!("unexpected btc transaction type"); - } - let stack_stx_obj = burnchain_op.get("stack_stx").unwrap(); - let signer_key_found = stack_stx_obj - .get("signer_key") - .expect("Expected signer_key in burn op") - .as_str() - .unwrap(); - assert_eq!(signer_key_found, signer_key.to_hex()); - - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = Value::try_deserialize_hex_untyped(&raw_result[2..]).unwrap(); - info!("Clarity result of stack-stx op: {parsed}"); - parsed - .expect_result_ok() - .expect("Expected OK result for stack-stx op"); - - stack_stx_found = true; - stack_stx_burn_op_tx_count += 1; - } - } - } - assert!(stack_stx_found, "Expected stack STX op"); - assert_eq!( - stack_stx_burn_op_tx_count, 1, - "Stack-stx tx without a signer_key shouldn't have been submitted" - ); - - let sortdb = btc_regtest_controller.sortdb_mut(); - let sortdb_conn = sortdb.conn(); - let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb_conn).unwrap(); - - let ancestor_burnchain_header_hashes = - SortitionDB::get_ancestor_burnchain_header_hashes(sortdb.conn(), &tip.burn_header_hash, 6) - .unwrap(); - - let mut all_stacking_burn_ops = vec![]; - let mut found_none = false; - let mut found_some = false; - // go from oldest burn header hash to newest - for ancestor_bhh in ancestor_burnchain_header_hashes.iter().rev() { - let stacking_ops = SortitionDB::get_stack_stx_ops(sortdb_conn, ancestor_bhh).unwrap(); - for stacking_op in stacking_ops.into_iter() { - debug!("Stacking op queried from sortdb: {:?}", stacking_op); - match stacking_op.signer_key { - Some(_) => found_some = true, - None => found_none = true, - } - all_stacking_burn_ops.push(stacking_op); - } - } - assert_eq!( - all_stacking_burn_ops.len(), - 2, - "Both stack-stx ops with and without a signer_key should be considered valid." - ); - assert!( - found_none, - "Expected one stacking_op to have a signer_key of None" - ); - assert!( - found_some, - "Expected one stacking_op to have a signer_key of Some" - ); - - test_observer::clear(); - channel.stop_chains_coordinator(); -} - -#[test] -#[ignore] -fn vote_for_aggregate_key_burn_op_test() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let spender_sk = StacksPrivateKey::from_hex(SK_1).unwrap(); - let spender_stx_addr: StacksAddress = to_addr(&spender_sk); - let spender_addr: PrincipalData = spender_stx_addr.clone().into(); - - let pox_pubkey = Secp256k1PublicKey::from_hex( - "02f006a09b59979e2cb8449f58076152af6b124aa29b948a3714b8d5f15aa94ede", - ) - .unwrap(); - let _pox_pubkey_hash = bytes_to_hex( - &Hash160::from_node_public_key(&pox_pubkey) - .to_bytes() - .to_vec(), - ); - - let (mut conf, _miner_account) = neon_integration_test_conf(); - - let first_bal = 6_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); - let stacked_bal = 1_000_000_000 * (core::MICROSTACKS_PER_STACKS as u128); - - conf.initial_balances.push(InitialBalance { - address: spender_addr.clone(), - amount: first_bal, + conf.initial_balances.push(InitialBalance { + address: recipient_addr.clone().into(), + amount: 300, }); // update epoch info so that Epoch 2.1 takes effect @@ -2688,37 +2003,9 @@ fn vote_for_aggregate_key_burn_op_test() { StacksEpoch { epoch_id: StacksEpochId::Epoch21, start_height: 2, - end_height: 3, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_1, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch22, - start_height: 3, - end_height: 4, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_2, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch23, - start_height: 4, - end_height: 5, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_3, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch24, - start_height: 5, - end_height: 6, - block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_4, - }, - StacksEpoch { - epoch_id: StacksEpochId::Epoch25, - start_height: 6, end_height: 9223372036854775807, block_limit: BLOCK_LIMIT_MAINNET_21.clone(), - network_epoch: PEER_VERSION_EPOCH_2_5, + network_epoch: PEER_VERSION_EPOCH_2_1, }, ]); conf.burnchain.pox_2_activation = Some(3); @@ -2777,61 +2064,18 @@ fn vote_for_aggregate_key_burn_op_test() { // give the run loop some time to start up! wait_for_runloop(&blocks_processed); - test_observer::clear(); - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!("Bootstrapped to 2.5, submitting stack-stx and pre-stx op..."); - - // setup stack-stx tx - - let signer_sk = spender_sk.clone(); - let signer_pk = StacksPublicKey::from_private(&signer_sk); - - let pox_addr = PoxAddress::Standard(spender_stx_addr, Some(AddressHashMode::SerializeP2PKH)); - - let mut block_height = btc_regtest_controller.get_headers_height(); - - let reward_cycle = burnchain_config - .block_height_to_reward_cycle(block_height) - .unwrap(); - - let signature = make_pox_4_signer_key_signature( - &pox_addr, - &signer_sk, - reward_cycle.into(), - &Pox4SignatureTopic::StackStx, - CHAIN_ID_TESTNET, - 12, - u128::MAX, - 1, - ) - .unwrap(); - - let signer_pk_bytes = signer_pk.to_bytes_compressed(); + test_observer::clear(); - let stacking_tx = make_contract_call( - &spender_sk, - 0, - 500, - &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), - "pox-4", - "stack-stx", - &[ - Value::UInt(stacked_bal), - Value::Tuple(pox_addr.as_clarity_tuple().unwrap()), - Value::UInt(block_height.into()), - Value::UInt(12), - Value::some(Value::buff_from(signature.to_rsv()).unwrap()).unwrap(), - Value::buff_from(signer_pk_bytes.clone()).unwrap(), - Value::UInt(u128::MAX), - Value::UInt(1), - ], - ); + // Mine a few more blocks so that Epoch 2.1 (and thus pox-2) can take effect. + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); + // okay, let's send a pre-stx op. let pre_stx_op = PreStxOp { output: spender_stx_addr.clone(), // to be filled in @@ -2841,10 +2085,12 @@ fn vote_for_aggregate_key_burn_op_test() { burn_header_hash: BurnchainHeaderHash([0u8; 32]), }; + let mut miner_signer = Keychain::default(conf.node.seed.clone()).generate_op_signer(); + assert!( btc_regtest_controller .submit_operation( - StacksEpochId::Epoch25, + StacksEpochId::Epoch21, BlockstackOperationType::PreStx(pre_stx_op), &mut miner_signer, 1 @@ -2853,107 +2099,115 @@ fn vote_for_aggregate_key_burn_op_test() { "Pre-stx operation should submit successfully" ); - // push the stacking transaction - submit_tx(&http_origin, &stacking_tx); - - info!("Submitted stack-stx and pre-stx op at block {block_height}, mining a few blocks..."); - - // Wait a few blocks to be registered - for _i in 0..5 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - block_height = btc_regtest_controller.get_headers_height(); - } - - let reward_cycle = burnchain_config - .block_height_to_reward_cycle(block_height) - .unwrap(); - - let signer_key: StacksPublicKeyBuffer = signer_pk_bytes.clone().as_slice().into(); - - let aggregate_pk = Secp256k1PublicKey::new(); - let aggregate_key: StacksPublicKeyBuffer = aggregate_pk.to_bytes_compressed().as_slice().into(); - - let signer_index = 0; + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - info!( - "Submitting vote for aggregate key op"; - "block_height" => block_height, - "reward_cycle" => reward_cycle, - "signer_index" => %signer_index, - ); - - let vote_for_aggregate_key_op = - BlockstackOperationType::VoteForAggregateKey(VoteForAggregateKeyOp { - signer_key, - signer_index, - sender: spender_stx_addr.clone(), - round: 0, - reward_cycle, - aggregate_key, - // to be filled in - vtxindex: 0, - txid: Txid([0u8; 32]), - block_height: 0, - burn_header_hash: BurnchainHeaderHash::zero(), - }); + // let's fire off our delegate op. + let del_stx_op = DelegateStxOp { + sender: spender_stx_addr.clone(), + delegate_to: recipient_addr.clone(), + reward_addr: None, + delegated_ustx: 100_000, + // to be filled in + txid: Txid([0u8; 32]), + vtxindex: 0, + block_height: 0, + burn_header_hash: BurnchainHeaderHash([0u8; 32]), + until_burn_height: None, + }; - let mut spender_signer = BurnchainOpSigner::new(signer_sk.clone(), false); + let mut spender_signer = BurnchainOpSigner::new(spender_sk.clone(), false); assert!( btc_regtest_controller .submit_operation( - StacksEpochId::Epoch25, - vote_for_aggregate_key_op, + StacksEpochId::Epoch21, + BlockstackOperationType::DelegateStx(del_stx_op), &mut spender_signer, 1 ) .is_some(), - "Vote for aggregate key operation should submit successfully" + "Delegate operation should submit successfully" + ); + + // the second block should process the delegation, after which the balaces should be unchanged + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + assert_eq!(get_balance(&http_origin, &spender_addr), 100300); + assert_eq!(get_balance(&http_origin, &recipient_addr), 300); + + // send a delegate-stack-stx transaction + let sort_height = channel.get_sortitions_processed(); + let tx = make_contract_call( + &recipient_sk, + 0, + 293, + &StacksAddress::from_string("ST000000000000000000002AMW42H").unwrap(), + "pox-2", + "delegate-stack-stx", + &[ + Value::Principal(spender_addr.clone()), + Value::UInt(100_000), + execute( + &format!("{{ hashbytes: 0x{}, version: 0x00 }}", pox_pubkey_hash), + ClarityVersion::Clarity2, + ) + .unwrap() + .unwrap(), + Value::UInt(sort_height as u128), + Value::UInt(6), + ], ); - info!("Submitted vote for aggregate key op at height {block_height}, mining a few blocks..."); + // push the stacking transaction + submit_tx(&http_origin, &tx); - // the second block should process the vote, after which the vote should be processed + // let's mine until the next reward cycle starts ... + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // check the locked amount for the spender account + let account = get_account(&http_origin, &spender_stx_addr); + assert_eq!(account.locked, 100_000); - let mut vote_for_aggregate_key_found = false; + let mut delegate_stack_stx_found = false; + let mut delegate_stx_found = false; let blocks = test_observer::get_blocks(); for block in blocks.iter() { - let transactions = block.get("transactions").unwrap().as_array().unwrap(); - for tx in transactions.iter() { - let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); - if raw_tx == "0x00" { - debug!("Found a burn op: {:?}", tx); - let burnchain_op = tx.get("burnchain_op").unwrap().as_object().unwrap(); - if !burnchain_op.contains_key("vote_for_aggregate_key") { - warn!("Got unexpected burnchain op: {:?}", burnchain_op); - panic!("unexpected btc transaction type"); - } - let vote_obj = burnchain_op.get("vote_for_aggregate_key").unwrap(); - let agg_key = vote_obj - .get("aggregate_key") - .expect("Expected aggregate_key key in burn op") - .as_str() - .unwrap(); - assert_eq!(agg_key, aggregate_key.to_hex()); - let signer_key = vote_obj.get("signer_key").unwrap().as_str().unwrap(); - assert_eq!(to_hex(&signer_pk_bytes), signer_key); + let events = block.get("events").unwrap().as_array().unwrap(); + for event in events.iter() { + let event_type = event.get("type").unwrap().as_str().unwrap(); + if event_type == "contract_event" { + let contract_event = event.get("contract_event").unwrap().as_object().unwrap(); + + // Check that it is a print event + let sub_type = contract_event.get("topic").unwrap().as_str().unwrap(); + assert_eq!(sub_type, "print"); - vote_for_aggregate_key_found = true; + // Ensure that the function name is as expected + // This verifies that there were print events for delegate-stack-stx and delegate-stx + let name_field = + &contract_event["value"]["Response"]["data"]["Tuple"]["data_map"]["name"]; + let name_data = name_field["Sequence"]["String"]["ASCII"]["data"] + .as_array() + .unwrap(); + let ascii_vec = name_data + .iter() + .map(|num| num.as_u64().unwrap() as u8) + .collect(); + let name = String::from_utf8(ascii_vec).unwrap(); + if name == "delegate-stack-stx" { + delegate_stack_stx_found = true; + } else if name == "delegate-stx" { + delegate_stx_found = true; + } } } } - assert!( - vote_for_aggregate_key_found, - "Expected vote for aggregate key op" - ); - - // Check that the correct key was set - let saved_key = get_key_for_cycle(reward_cycle, false, &conf.node.rpc_bind) - .expect("Expected to be able to check key is set after voting") - .expect("Expected aggregate key to be set"); - - assert_eq!(saved_key, aggregate_key.as_bytes().to_vec()); + assert!(delegate_stx_found); + assert!(delegate_stack_stx_found); test_observer::clear(); channel.stop_chains_coordinator(); @@ -3321,8 +2575,8 @@ fn microblock_fork_poison_integration_test() { conf.miner.subsequent_attempt_time_ms = 5_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -4352,8 +3606,8 @@ fn size_check_integration_test() { conf.node.microblock_frequency = 5000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -4528,8 +3782,8 @@ fn size_overflow_unconfirmed_microblocks_integration_test() { conf.node.microblock_frequency = 5_000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); conf.events_observers.insert(EventObserverConfig { @@ -4724,8 +3978,8 @@ fn size_overflow_unconfirmed_stream_microblocks_integration_test() { conf.node.max_microblocks = 65536; conf.burnchain.max_rbf = 1000000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); conf.events_observers.insert(EventObserverConfig { @@ -4918,8 +4172,8 @@ fn size_overflow_unconfirmed_invalid_stream_microblocks_integration_test() { epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; conf.burnchain.epochs = Some(epochs); - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); conf.events_observers.insert(EventObserverConfig { @@ -5180,8 +4434,8 @@ fn runtime_overflow_unconfirmed_microblocks_integration_test() { conf.node.microblock_frequency = 15000; conf.miner.microblock_attempt_time_ms = 120_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; let mut epochs = core::STACKS_EPOCHS_REGTEST.to_vec(); epochs[1].block_limit = core::BLOCK_LIMIT_MAINNET_20; @@ -5355,8 +4609,8 @@ fn block_replay_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 5_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -5805,8 +5059,8 @@ fn mining_events_integration_test() { conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -6076,8 +5330,8 @@ fn block_limit_hit_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; test_observer::spawn(); @@ -6289,12 +5543,12 @@ fn microblock_limit_hit_integration_test() { conf.node.wait_time_for_microblocks = 1000; conf.node.microblock_frequency = 1000; - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; + conf.miner.microblock_attempt_time_ms = i64::max_value() as u64; conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; conf.burnchain.epochs = Some(vec![ StacksEpoch { @@ -6500,12 +5754,12 @@ fn block_large_tx_integration_test() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; - conf.miner.microblock_attempt_time_ms = i64::MAX as u64; + conf.miner.microblock_attempt_time_ms = i64::max_value() as u64; conf.burnchain.max_rbf = 10_000_000; conf.node.wait_time_for_blocks = 1_000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; let mut btcd_controller = BitcoinCoreController::new(conf.clone()); btcd_controller @@ -6638,8 +5892,8 @@ fn microblock_large_tx_integration_test_FLAKY() { conf.node.wait_time_for_microblocks = 30000; conf.node.microblock_frequency = 1000; - conf.miner.first_attempt_time_ms = i64::MAX as u64; - conf.miner.subsequent_attempt_time_ms = i64::MAX as u64; + conf.miner.first_attempt_time_ms = i64::max_value() as u64; + conf.miner.subsequent_attempt_time_ms = i64::max_value() as u64; conf.miner.microblock_attempt_time_ms = 1_000; conf.node.wait_time_for_microblocks = 0; @@ -8093,8 +7347,8 @@ fn atlas_stress_integration_test() { .initial_balances .append(&mut initial_balances.clone()); - conf_bootstrap_node.miner.first_attempt_time_ms = u64::MAX; - conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::MAX; + conf_bootstrap_node.miner.first_attempt_time_ms = u64::max_value(); + conf_bootstrap_node.miner.subsequent_attempt_time_ms = u64::max_value(); conf_bootstrap_node.node.mine_microblocks = true; conf_bootstrap_node.miner.microblock_attempt_time_ms = 2_000; @@ -10256,11 +9510,8 @@ fn test_problematic_blocks_are_not_relayed_or_stored() { // at least one block was mined (hard to say how many due to the raciness between the burnchain // downloader and this thread). - info!( - "tip_info.stacks_tip_height = {}, old_tip_info.stacks_tip_height = {}", - tip_info.stacks_tip_height, old_tip_info.stacks_tip_height - ); assert!(tip_info.stacks_tip_height > old_tip_info.stacks_tip_height); + // one was problematic -- i.e. the one that included tx_high assert_eq!(all_new_files.len(), 1); @@ -12133,129 +11384,3 @@ fn filter_txs_by_origin() { test_observer::clear(); } - -// https://stackoverflow.com/questions/26958489/how-to-copy-a-folder-recursively-in-rust -fn copy_dir_all(src: impl AsRef, dst: impl AsRef) -> io::Result<()> { - fs::create_dir_all(&dst)?; - for entry in fs::read_dir(src)? { - let entry = entry?; - let ty = entry.file_type()?; - if ty.is_dir() { - copy_dir_all(entry.path(), dst.as_ref().join(entry.file_name()))?; - } else { - fs::copy(entry.path(), dst.as_ref().join(entry.file_name()))?; - } - } - Ok(()) -} - -#[test] -#[ignore] -fn bitcoin_reorg_flap() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - let (conf, _miner_account) = neon_integration_test_conf(); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let mut btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - - btc_regtest_controller.bootstrap_chain(201); - - eprintln!("Chain bootstrapped..."); - - let mut run_loop = neon::RunLoop::new(conf.clone()); - let blocks_processed = run_loop.get_blocks_processed_arc(); - - let channel = run_loop.get_coordinator_channel().unwrap(); - - thread::spawn(move || run_loop.start(None, 0)); - - // give the run loop some time to start up! - wait_for_runloop(&blocks_processed); - - // first block wakes up the run loop - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - // first block will hold our VRF registration - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - - let mut sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); - - while sort_height < 210 { - next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); - sort_height = channel.get_sortitions_processed(); - eprintln!("Sort height: {}", sort_height); - } - - // stop bitcoind and copy its DB to simulate a chain flap - btcd_controller.stop_bitcoind().unwrap(); - thread::sleep(Duration::from_secs(5)); - - let btcd_dir = conf.get_burnchain_path_str(); - let mut new_conf = conf.clone(); - new_conf.node.working_dir = format!("{}.new", &conf.node.working_dir); - fs::create_dir_all(&new_conf.node.working_dir).unwrap(); - - copy_dir_all(&btcd_dir, &new_conf.get_burnchain_path_str()).unwrap(); - - // resume - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - let btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - thread::sleep(Duration::from_secs(5)); - - info!("\n\nBegin fork A\n\n"); - - // make fork A - for _i in 0..3 { - btc_regtest_controller.build_next_block(1); - thread::sleep(Duration::from_secs(5)); - } - - btcd_controller.stop_bitcoind().unwrap(); - - info!("\n\nBegin reorg flap from A to B\n\n"); - - // carry out the flap to fork B -- new_conf's state was the same as before the reorg - let mut btcd_controller = BitcoinCoreController::new(new_conf.clone()); - let btc_regtest_controller = BitcoinRegtestController::new(new_conf.clone(), None); - - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - for _i in 0..5 { - btc_regtest_controller.build_next_block(1); - thread::sleep(Duration::from_secs(5)); - } - - btcd_controller.stop_bitcoind().unwrap(); - - info!("\n\nBegin reorg flap from B to A\n\n"); - - let mut btcd_controller = BitcoinCoreController::new(conf.clone()); - let btc_regtest_controller = BitcoinRegtestController::new(conf.clone(), None); - btcd_controller - .start_bitcoind() - .expect("Failed starting bitcoind"); - - // carry out the flap back to fork A - for _i in 0..7 { - btc_regtest_controller.build_next_block(1); - thread::sleep(Duration::from_secs(5)); - } - - assert_eq!(channel.get_sortitions_processed(), 225); - btcd_controller.stop_bitcoind().unwrap(); - channel.stop_chains_coordinator(); -} diff --git a/testnet/stacks-node/src/tests/signer.rs b/testnet/stacks-node/src/tests/signer.rs index 867421b5c02..54e851be9f4 100644 --- a/testnet/stacks-node/src/tests/signer.rs +++ b/testnet/stacks-node/src/tests/signer.rs @@ -9,19 +9,18 @@ use std::{env, thread}; use clarity::boot_util::boot_code_id; use clarity::vm::Value; use libsigner::{ - BlockResponse, MessageSlotID, RejectCode, RunningSigner, Signer, SignerEventReceiver, - SignerMessage, + BlockResponse, RejectCode, RunningSigner, Signer, SignerEventReceiver, SignerMessage, + BLOCK_MSG_ID, }; use rand::thread_rng; use rand_core::RngCore; use stacks::burnchains::Txid; use stacks::chainstate::coordinator::comm::CoordinatorChannels; use stacks::chainstate::nakamoto::signer_set::NakamotoSigners; -use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader}; +use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoBlockVote}; use stacks::chainstate::stacks::boot::{ SIGNERS_NAME, SIGNERS_VOTING_FUNCTION_NAME, SIGNERS_VOTING_NAME, }; -use stacks::chainstate::stacks::events::StackerDBChunksEvent; use stacks::chainstate::stacks::miner::TransactionEvent; use stacks::chainstate::stacks::{ StacksPrivateKey, StacksTransaction, ThresholdSignature, TransactionAnchorMode, @@ -32,13 +31,13 @@ use stacks::core::StacksEpoch; use stacks::net::api::postblock_proposal::BlockValidateResponse; use stacks::util_lib::strings::StacksString; use stacks_common::bitvec::BitVec; -use stacks_common::codec::StacksMessageCodec; +use stacks_common::codec::{read_next, StacksMessageCodec}; use stacks_common::consts::{CHAIN_ID_TESTNET, SIGNER_SLOTS_PER_USER}; use stacks_common::types::chainstate::{ ConsensusHash, StacksAddress, StacksBlockId, StacksPublicKey, TrieHash, }; use stacks_common::types::StacksEpochId; -use stacks_common::util::hash::{hex_bytes, MerkleTree, Sha512Trunc256Sum}; +use stacks_common::util::hash::{MerkleTree, Sha512Trunc256Sum}; use stacks_common::util::secp256k1::MessageSignature; use stacks_signer::client::{StackerDB, StacksClient}; use stacks_signer::config::{build_signer_config_tomls, GlobalConfig as SignerConfig, Network}; @@ -46,9 +45,12 @@ use stacks_signer::runloop::RunLoopCommand; use stacks_signer::signer::{Command as SignerCommand, SignerSlotID}; use tracing_subscriber::prelude::*; use tracing_subscriber::{fmt, EnvFilter}; +use wsts::common::Signature; +use wsts::compute::tweaked_public_key; use wsts::curve::point::Point; use wsts::curve::scalar::Scalar; use wsts::state_machine::OperationResult; +use wsts::taproot::SchnorrProof; use crate::config::{Config as NeonConfig, EventKeyType, EventObserverConfig, InitialBalance}; use crate::event_dispatcher::MinedNakamotoBlockEvent; @@ -92,8 +94,6 @@ struct SignerTest { pub signer_stacks_private_keys: Vec, // link to the stacks node pub stacks_client: StacksClient, - // Unique number used to isolate files created during the test - pub run_stamp: u16, } impl SignerTest { @@ -109,8 +109,6 @@ impl SignerTest { let password = "12345"; naka_conf.connection_options.block_proposal_token = Some(password.to_string()); - let run_stamp = rand::random(); - // Setup the signer and coordinator configurations let signer_configs = build_signer_config_tomls( &signer_stacks_private_keys, @@ -118,10 +116,6 @@ impl SignerTest { Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. &Network::Testnet, password, - run_stamp, - 3000, - Some(100_000), - None, ); let mut running_signers = Vec::new(); @@ -152,7 +146,6 @@ impl SignerTest { running_signers, signer_stacks_private_keys, stacks_client, - run_stamp, } } @@ -209,8 +202,7 @@ impl SignerTest { let current_block_height = self .running_nodes .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + .get_headers_height(); let curr_reward_cycle = self.get_current_reward_cycle(); let next_reward_cycle = curr_reward_cycle.saturating_add(1); let next_reward_cycle_height = self @@ -229,14 +221,15 @@ impl SignerTest { let current_block_height = self .running_nodes .btc_regtest_controller - .get_headers_height() - .saturating_sub(1); // Must subtract 1 since get_headers_height returns current block height + 1 + .get_headers_height(); let reward_cycle_height = self .running_nodes .btc_regtest_controller .get_burnchain() .reward_cycle_to_block_height(reward_cycle); - reward_cycle_height.saturating_sub(current_block_height) + reward_cycle_height + .saturating_sub(current_block_height) + .saturating_sub(1) } // Only call after already past the epoch 3.0 boundary @@ -252,12 +245,17 @@ impl SignerTest { .running_nodes .btc_regtest_controller .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 .saturating_add(nmb_blocks_to_mine_to_dkg); - info!("Mining {nmb_blocks_to_mine_to_dkg} bitcoin block(s) to reach DKG calculation at bitcoin height {end_block_height}"); + info!("Mining {nmb_blocks_to_mine_to_dkg} Nakamoto block(s) to reach DKG calculation at block height {end_block_height}"); for i in 1..=nmb_blocks_to_mine_to_dkg { - info!("Mining bitcoin block #{i} and nakamoto tenure of {nmb_blocks_to_mine_to_dkg}"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); + info!("Mining Nakamoto block #{i} of {nmb_blocks_to_mine_to_dkg}"); + self.mine_nakamoto_block(timeout); + let hash = self.wait_for_validate_ok_response(timeout); + let signatures = self.wait_for_frost_signatures(timeout); + // Verify the signers accepted the proposed block and are using the new DKG to sign it + for signature in &signatures { + assert!(signature.verify(&set_dkg, hash.0.as_slice())); + } } if nmb_blocks_to_mine_to_dkg == 0 { None @@ -294,13 +292,7 @@ impl SignerTest { ) } if total_nmb_blocks_to_mine >= nmb_blocks_to_reward_cycle { - let end_block_height = self - .running_nodes - .btc_regtest_controller - .get_headers_height() - .saturating_sub(1) // Must subtract 1 since get_headers_height returns current block height + 1 - .saturating_add(nmb_blocks_to_reward_cycle); - debug!("Mining {nmb_blocks_to_reward_cycle} Nakamoto block(s) to reach the next reward cycle boundary at {end_block_height}."); + debug!("Mining {nmb_blocks_to_reward_cycle} Nakamoto block(s) to reach the next reward cycle boundary."); for i in 1..=nmb_blocks_to_reward_cycle { debug!("Mining Nakamoto block #{i} of {nmb_blocks_to_reward_cycle}"); let curr_reward_cycle = self.get_current_reward_cycle(); @@ -309,38 +301,37 @@ impl SignerTest { .get_approved_aggregate_key(curr_reward_cycle) .expect("Failed to get approved aggregate key") .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); + self.mine_nakamoto_block(timeout); + let hash = self.wait_for_validate_ok_response(timeout); + let signatures = self.wait_for_frost_signatures(timeout); + // Verify the signers accepted the proposed block and are using the new DKG to sign it + for signature in &signatures { + assert!(signature.verify(&set_dkg, hash.0.as_slice())); + } } total_nmb_blocks_to_mine -= nmb_blocks_to_reward_cycle; nmb_blocks_to_reward_cycle = 0; blocks_to_dkg = self.nmb_blocks_to_reward_set_calculation(); } } - for i in 1..=total_nmb_blocks_to_mine { - info!("Mining Nakamoto block #{i} of {total_nmb_blocks_to_mine} to reach {burnchain_height}"); + for _ in 1..=total_nmb_blocks_to_mine { let curr_reward_cycle = self.get_current_reward_cycle(); let set_dkg = self .stacks_client .get_approved_aggregate_key(curr_reward_cycle) .expect("Failed to get approved aggregate key") .expect("No approved aggregate key found"); - self.mine_and_verify_confirmed_naka_block(&set_dkg, timeout); + self.mine_nakamoto_block(timeout); + let hash = self.wait_for_validate_ok_response(timeout); + let signatures = self.wait_for_frost_signatures(timeout); + // Verify the signers accepted the proposed block and are using the new DKG to sign it + for signature in &signatures { + assert!(signature.verify(&set_dkg, hash.0.as_slice())); + } } points } - fn mine_and_verify_confirmed_naka_block( - &mut self, - agg_key: &Point, - timeout: Duration, - ) -> MinedNakamotoBlockEvent { - let new_block = self.mine_nakamoto_block(timeout); - let signer_sighash = new_block.signer_signature_hash.clone(); - let signature = self.wait_for_confirmed_block(&signer_sighash, timeout); - assert!(signature.0.verify(&agg_key, signer_sighash.as_bytes())); - new_block - } - fn mine_nakamoto_block(&mut self, timeout: Duration) -> MinedNakamotoBlockEvent { let commits_submitted = self.running_nodes.commits_submitted.clone(); let mined_block_time = Instant::now(); @@ -368,41 +359,6 @@ impl SignerTest { test_observer::get_mined_nakamoto_blocks().pop().unwrap() } - fn wait_for_confirmed_block( - &mut self, - block_signer_sighash: &Sha512Trunc256Sum, - timeout: Duration, - ) -> ThresholdSignature { - let t_start = Instant::now(); - while t_start.elapsed() <= timeout { - let blocks = test_observer::get_blocks(); - if let Some(signature) = blocks.iter().find_map(|block_json| { - let block_obj = block_json.as_object().unwrap(); - let sighash = block_obj - // use the try operator because non-nakamoto blocks - // do not supply this field - .get("signer_signature_hash")? - .as_str() - .unwrap(); - if sighash != &format!("0x{block_signer_sighash}") { - return None; - } - let signer_signature_hex = - block_obj.get("signer_signature").unwrap().as_str().unwrap(); - let signer_signature_bytes = hex_bytes(&signer_signature_hex[2..]).unwrap(); - let signer_signature = ThresholdSignature::consensus_deserialize( - &mut signer_signature_bytes.as_slice(), - ) - .unwrap(); - Some(signer_signature) - }) { - return signature; - } - thread::sleep(Duration::from_millis(500)); - } - panic!("Timed out while waiting for confirmation of block with signer sighash = {block_signer_sighash}") - } - fn wait_for_validate_ok_response(&mut self, timeout: Duration) -> Sha512Trunc256Sum { // Wait for the block to show up in the test observer (Don't have to wait long as if we have received a mined block already, // we know that the signers have already received their block proposal events via their event observers) @@ -435,11 +391,22 @@ impl SignerTest { .expect("failed to recv dkg results"); for result in results { match result { + OperationResult::Sign(sig) => { + panic!("Received Signature ({},{})", &sig.R, &sig.z); + } + OperationResult::SignTaproot(proof) => { + panic!("Received SchnorrProof ({},{})", &proof.r, &proof.s); + } + OperationResult::DkgError(dkg_error) => { + panic!("Received DkgError {:?}", dkg_error); + } + OperationResult::SignError(sign_error) => { + panic!("Received SignError {}", sign_error); + } OperationResult::Dkg(point) => { info!("Received aggregate_group_key {point}"); aggregate_public_key = Some(point); } - other => panic!("{}", operation_panic_message(&other)), } } if aggregate_public_key.is_some() || dkg_now.elapsed() > timeout { @@ -454,6 +421,92 @@ impl SignerTest { key } + fn wait_for_frost_signatures(&mut self, timeout: Duration) -> Vec { + debug!("Waiting for frost signatures..."); + let mut results = Vec::new(); + let sign_now = Instant::now(); + for recv in self.result_receivers.iter() { + let mut frost_signature = None; + loop { + let results = recv + .recv_timeout(timeout) + .expect("failed to recv signature results"); + for result in results { + match result { + OperationResult::Sign(sig) => { + info!("Received Signature ({},{})", &sig.R, &sig.z); + frost_signature = Some(sig); + } + OperationResult::SignTaproot(proof) => { + panic!("Received SchnorrProof ({},{})", &proof.r, &proof.s); + } + OperationResult::DkgError(dkg_error) => { + panic!("Received DkgError {:?}", dkg_error); + } + OperationResult::SignError(sign_error) => { + panic!("Received SignError {}", sign_error); + } + OperationResult::Dkg(point) => { + panic!("Received aggregate_group_key {point}"); + } + } + } + if frost_signature.is_some() || sign_now.elapsed() > timeout { + break; + } + } + + let frost_signature = frost_signature + .expect(&format!("Failed to get frost signature within {timeout:?}")); + results.push(frost_signature); + } + debug!("Finished waiting for frost signatures!"); + results + } + + fn wait_for_taproot_signatures(&mut self, timeout: Duration) -> Vec { + debug!("Waiting for taproot signatures..."); + let mut results = vec![]; + let sign_now = Instant::now(); + for recv in self.result_receivers.iter() { + let mut schnorr_proof = None; + loop { + let results = recv + .recv_timeout(timeout) + .expect("failed to recv signature results"); + for result in results { + match result { + OperationResult::Sign(sig) => { + panic!("Received Signature ({},{})", &sig.R, &sig.z); + } + OperationResult::SignTaproot(proof) => { + info!("Received SchnorrProof ({},{})", &proof.r, &proof.s); + schnorr_proof = Some(proof); + } + OperationResult::DkgError(dkg_error) => { + panic!("Received DkgError {:?}", dkg_error); + } + OperationResult::SignError(sign_error) => { + panic!("Received SignError {}", sign_error); + } + OperationResult::Dkg(point) => { + panic!("Received aggregate_group_key {point}"); + } + } + } + if schnorr_proof.is_some() || sign_now.elapsed() > timeout { + break; + } + } + let schnorr_proof = schnorr_proof.expect(&format!( + "Failed to get schnorr proof signature within {timeout:?}" + )); + results.push(schnorr_proof); + } + debug!("Finished waiting for taproot signatures!"); + results + } + fn run_until_epoch_3_boundary(&mut self) { let epochs = self.running_nodes.conf.burnchain.epochs.clone().unwrap(); let epoch_3 = @@ -551,7 +604,7 @@ impl SignerTest { None, ), }; - let invalid_contract_address = StacksClient::build_unsigned_contract_call_transaction( + let invalid_contract_address = StacksClient::build_signed_contract_call_transaction( &StacksAddress::p2pkh(false, &StacksPublicKey::from_private(&signer_private_key)), contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -560,10 +613,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); - let invalid_contract_name = StacksClient::build_unsigned_contract_call_transaction( + let invalid_contract_name = StacksClient::build_signed_contract_call_transaction( &contract_addr, "bad-signers-contract-name".into(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -572,10 +626,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); - let invalid_signers_vote_function = StacksClient::build_unsigned_contract_call_transaction( + let invalid_signers_vote_function = StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), "some-other-function".into(), @@ -584,11 +639,12 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); let invalid_function_arg_signer_index = - StacksClient::build_unsigned_contract_call_transaction( + StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -602,10 +658,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); - let invalid_function_arg_key = StacksClient::build_unsigned_contract_call_transaction( + let invalid_function_arg_key = StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -619,10 +676,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); - let invalid_function_arg_round = StacksClient::build_unsigned_contract_call_transaction( + let invalid_function_arg_round = StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -636,11 +694,12 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); let invalid_function_arg_reward_cycle = - StacksClient::build_unsigned_contract_call_transaction( + StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -654,10 +713,11 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 1, + 10, ) .unwrap(); - let invalid_nonce = StacksClient::build_unsigned_contract_call_transaction( + let invalid_nonce = StacksClient::build_signed_contract_call_transaction( &contract_addr, contract_name.clone(), SIGNERS_VOTING_FUNCTION_NAME.into(), @@ -666,6 +726,7 @@ impl SignerTest { TransactionVersion::Testnet, CHAIN_ID_TESTNET, 0, // Old nonce + 10, ) .unwrap(); @@ -676,10 +737,10 @@ impl SignerTest { false, ); let invalid_signer_tx = invalid_stacks_client - .build_unsigned_vote_for_aggregate_public_key(0, round, point, reward_cycle, 0) + .build_vote_for_aggregate_public_key(0, round, point, reward_cycle, None, 0) .expect("FATAL: failed to build vote for aggregate public key"); - let unsigned_txs = vec![ + vec![ invalid_nonce, invalid_not_contract_call, invalid_contract_name, @@ -690,57 +751,7 @@ impl SignerTest { invalid_function_arg_round, invalid_function_arg_signer_index, invalid_signer_tx, - ]; - unsigned_txs - .into_iter() - .map(|unsigned| { - invalid_stacks_client - .sign_transaction(unsigned) - .expect("Failed to sign transaction") - }) - .collect() - } - - /// Kills the signer runloop at index `signer_idx` - /// and returns the private key of the killed signer. - /// - /// # Panics - /// Panics if `signer_idx` is out of bounds - fn stop_signer(&mut self, signer_idx: usize) -> StacksPrivateKey { - let running_signer = self.running_signers.remove(signer_idx); - self.signer_cmd_senders.remove(signer_idx); - self.result_receivers.remove(signer_idx); - let signer_key = self.signer_stacks_private_keys.remove(signer_idx); - - running_signer.stop(); - signer_key - } - - /// (Re)starts a new signer runloop with the given private key - fn restart_signer(&mut self, signer_idx: usize, signer_private_key: StacksPrivateKey) { - let signer_config = build_signer_config_tomls( - &[signer_private_key], - &self.running_nodes.conf.node.rpc_bind, - Some(Duration::from_millis(128)), // Timeout defaults to 5 seconds. Let's override it to 128 milliseconds. - &Network::Testnet, - "12345", // It worked sir, we have the combination! -Great, what's the combination? - self.run_stamp, - 3000 + signer_idx, - Some(100_000), - None, - ) - .pop() - .unwrap(); - - let (cmd_send, cmd_recv) = channel(); - let (res_send, res_recv) = channel(); - - info!("Restarting signer"); - let signer = spawn_signer(&signer_config, cmd_recv, res_send); - - self.result_receivers.insert(signer_idx, res_recv); - self.signer_cmd_senders.insert(signer_idx, cmd_send); - self.running_signers.insert(signer_idx, signer); + ] } fn shutdown(self) { @@ -791,11 +802,7 @@ fn setup_stx_btc_node( naka_conf.events_observers.insert(EventObserverConfig { endpoint: format!("{}", signer_config.endpoint), - events_keys: vec![ - EventKeyType::StackerDBChunks, - EventKeyType::BlockProposal, - EventKeyType::BurnchainBlocks, - ], + events_keys: vec![EventKeyType::StackerDBChunks, EventKeyType::BlockProposal], }); } @@ -891,26 +898,6 @@ fn setup_stx_btc_node( } } -fn operation_panic_message(result: &OperationResult) -> String { - match result { - OperationResult::Sign(sig) => { - format!("Received Signature ({},{})", sig.R, sig.z) - } - OperationResult::SignTaproot(proof) => { - format!("Received SchnorrProof ({},{})", proof.r, proof.s) - } - OperationResult::DkgError(dkg_error) => { - format!("Received DkgError {:?}", dkg_error) - } - OperationResult::SignError(sign_error) => { - format!("Received SignError {}", sign_error) - } - OperationResult::Dkg(point) => { - format!("Received aggregate_group_key {point}") - } - } -} - #[test] #[ignore] /// Test the signer can respond to external commands to perform DKG @@ -976,8 +963,8 @@ fn stackerdb_sign() { info!("------------------------- Test Setup -------------------------"); - info!("Creating invalid blocks to sign..."); - let header1 = NakamotoBlockHeader { + info!("Creating an invalid block to sign..."); + let header = NakamotoBlockHeader { version: 1, chain_length: 2, burn_spent: 3, @@ -989,12 +976,12 @@ fn stackerdb_sign() { signer_signature: ThresholdSignature::empty(), signer_bitvec: BitVec::zeros(1).unwrap(), }; - let mut block1 = NakamotoBlock { - header: header1, + let mut block = NakamotoBlock { + header, txs: vec![], }; - let tx_merkle_root1 = { - let txid_vecs = block1 + let tx_merkle_root = { + let txid_vecs = block .txs .iter() .map(|tx| tx.txid().as_bytes().to_vec()) @@ -1002,38 +989,18 @@ fn stackerdb_sign() { MerkleTree::::new(&txid_vecs).root() }; - block1.header.tx_merkle_root = tx_merkle_root1; + block.header.tx_merkle_root = tx_merkle_root; - let header2 = NakamotoBlockHeader { - version: 1, - chain_length: 3, - burn_spent: 4, - consensus_hash: ConsensusHash([0x05; 20]), - parent_block_id: StacksBlockId([0x06; 32]), - tx_merkle_root: Sha512Trunc256Sum([0x07; 32]), - state_index_root: TrieHash([0x08; 32]), - miner_signature: MessageSignature::empty(), - signer_signature: ThresholdSignature::empty(), - signer_bitvec: BitVec::zeros(1).unwrap(), - }; - let mut block2 = NakamotoBlock { - header: header2, - txs: vec![], - }; - let tx_merkle_root2 = { - let txid_vecs = block2 - .txs - .iter() - .map(|tx| tx.txid().as_bytes().to_vec()) - .collect(); - - MerkleTree::::new(&txid_vecs).root() + // The block is invalid so the signers should return a signature across a rejection + let block_vote = NakamotoBlockVote { + signer_signature_hash: block.header.signer_signature_hash(), + rejected: true, }; - block2.header.tx_merkle_root = tx_merkle_root2; + let msg = block_vote.serialize_to_vec(); let timeout = Duration::from_secs(200); let mut signer_test = SignerTest::new(10); - let _key = signer_test.boot_to_epoch_3(timeout); + let key = signer_test.boot_to_epoch_3(timeout); info!("------------------------- Test Sign -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); @@ -1043,7 +1010,7 @@ fn stackerdb_sign() { let sign_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block1, + block: block.clone(), is_taproot: false, merkle_root: None, }, @@ -1051,7 +1018,7 @@ fn stackerdb_sign() { let sign_taproot_command = RunLoopCommand { reward_cycle, command: SignerCommand::Sign { - block: block2, + block: block.clone(), is_taproot: true, merkle_root: None, }, @@ -1064,29 +1031,51 @@ fn stackerdb_sign() { .send(sign_taproot_command.clone()) .expect("failed to send sign taproot command"); } + let frost_signatures = signer_test.wait_for_frost_signatures(timeout); + let schnorr_proofs = signer_test.wait_for_taproot_signatures(timeout); - // Don't wait for signatures. Because the block miner is acting as - // the coordinator, signers won't directly sign commands issued by someone - // other than the miner. Rather, they'll just broadcast their rejections. - + for frost_signature in frost_signatures { + assert!(frost_signature.verify(&key, &msg)); + } + for schnorr_proof in schnorr_proofs { + let tweaked_key = tweaked_public_key(&key, None); + assert!( + schnorr_proof.verify(&tweaked_key.x(), &msg), + "Schnorr proof verification failed" + ); + } let sign_elapsed = sign_now.elapsed(); - info!("------------------------- Test Block Rejected -------------------------"); + info!("------------------------- Test Block Accepted -------------------------"); // Verify the signers rejected the proposed block let t_start = Instant::now(); - let signer_message = loop { + let mut chunk = None; + while chunk.is_none() { assert!( t_start.elapsed() < Duration::from_secs(30), "Timed out while waiting for signers block response stacker db event" ); let nakamoto_blocks = test_observer::get_stackerdb_chunks(); - if let Some(message) = find_block_response(nakamoto_blocks) { - break message; + for event in nakamoto_blocks { + // Only care about the miners block slot + if event.contract_id.name == format!("signers-1-{}", BLOCK_MSG_ID).as_str().into() + || event.contract_id.name == format!("signers-0-{}", BLOCK_MSG_ID).as_str().into() + { + for slot in event.modified_slots { + chunk = Some(slot.data); + break; + } + if chunk.is_some() { + break; + } + } } thread::sleep(Duration::from_secs(1)); - }; + } + let chunk = chunk.unwrap(); + let signer_message = read_next::(&mut &chunk[..]).unwrap(); if let SignerMessage::BlockResponse(BlockResponse::Rejected(rejection)) = signer_message { assert!(matches!( rejection.reason_code, @@ -1098,23 +1087,6 @@ fn stackerdb_sign() { info!("Sign Time Elapsed: {:.2?}", sign_elapsed); } -pub fn find_block_response(chunk_events: Vec) -> Option { - for event in chunk_events.into_iter() { - if event.contract_id.name.as_str() - == &format!("signers-1-{}", MessageSlotID::BlockResponse.to_u8()) - || event.contract_id.name.as_str() - == &format!("signers-0-{}", MessageSlotID::BlockResponse.to_u8()) - { - let Some(data) = event.modified_slots.first() else { - continue; - }; - let msg = SignerMessage::consensus_deserialize(&mut data.data.as_slice()).unwrap(); - return Some(msg); - } - } - None -} - #[test] #[ignore] /// Test that a signer can respond to a miners request for a signature on a block proposal @@ -1158,11 +1130,57 @@ fn stackerdb_block_proposal() { info!("------------------------- Test Block Signed -------------------------"); // Verify that the signers signed the proposed block - let signature = signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, timeout); - assert!(signature - .0 - .verify(&key, proposed_signer_signature_hash.as_bytes())); + let frost_signatures = signer_test.wait_for_frost_signatures(short_timeout); + for signature in &frost_signatures { + assert!( + signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), + "Signature verification failed" + ); + } + info!("------------------------- Test Signers Broadcast Block -------------------------"); + // Verify that the signers broadcasted a signed NakamotoBlock back to the .signers contract + let t_start = Instant::now(); + let mut chunk = None; + while chunk.is_none() { + assert!( + t_start.elapsed() < short_timeout, + "Timed out while waiting for signers block response stacker db event" + ); + let nakamoto_blocks = test_observer::get_stackerdb_chunks(); + for event in nakamoto_blocks { + if event.contract_id.name == format!("signers-1-{}", BLOCK_MSG_ID).as_str().into() + || event.contract_id.name == format!("signers-0-{}", BLOCK_MSG_ID).as_str().into() + { + for slot in event.modified_slots { + chunk = Some(slot.data); + break; + } + if chunk.is_some() { + break; + } + } + if chunk.is_some() { + break; + } + } + thread::sleep(Duration::from_secs(1)); + } + let chunk = chunk.unwrap(); + let signer_message = read_next::(&mut &chunk[..]).unwrap(); + if let SignerMessage::BlockResponse(BlockResponse::Accepted(( + block_signer_signature_hash, + block_signature, + ))) = signer_message + { + assert_eq!(block_signer_signature_hash, proposed_signer_signature_hash); + assert_eq!( + block_signature, + ThresholdSignature(frost_signatures.first().expect("No signature").clone()) + ); + } else { + panic!("Received unexpected message"); + } signer_test.shutdown(); } @@ -1311,8 +1329,13 @@ fn stackerdb_filter_bad_transactions() { .expect("Failed to write expected transactions to stackerdb"); info!("------------------------- Verify Nakamoto Block Mined -------------------------"); - let mined_block_event = - signer_test.mine_and_verify_confirmed_naka_block(¤t_signers_dkg, timeout); + let mined_block_event = signer_test.mine_nakamoto_block(timeout); + let hash = signer_test.wait_for_validate_ok_response(timeout); + let signatures = signer_test.wait_for_frost_signatures(timeout); + // Verify the signers accepted the proposed block and are using the previously determined dkg to sign it + for signature in &signatures { + assert!(signature.verify(¤t_signers_dkg, hash.0.as_slice())); + } for tx_event in &mined_block_event.tx_events { let TransactionEvent::Success(tx_success) = tx_event else { panic!("Received unexpected transaction event"); @@ -1326,72 +1349,3 @@ fn stackerdb_filter_bad_transactions() { } signer_test.shutdown(); } - -#[test] -#[ignore] -/// Test that signers will be able to continue their operations even if one signer is restarted. -/// -/// Test Setup: -/// The test spins up three stacks signers, one miner Nakamoto node, and a corresponding bitcoind. -/// The stacks node is advanced to epoch 2.5, triggering a DKG round. The stacks node is then advanced -/// to Epoch 3.0 boundary to allow block signing. -/// -/// Test Execution: -/// The signers sign one block as usual. -/// Then, one of the signers is restarted. -/// Finally, the signers sign another block with the restarted signer. -/// -/// Test Assertion: -/// The signers are able to produce a valid signature after one of them is restarted. -fn stackerdb_sign_after_signer_reboot() { - if env::var("BITCOIND_TEST") != Ok("1".into()) { - return; - } - - tracing_subscriber::registry() - .with(fmt::layer()) - .with(EnvFilter::from_default_env()) - .init(); - - info!("------------------------- Test Setup -------------------------"); - let mut signer_test = SignerTest::new(3); - let timeout = Duration::from_secs(200); - let short_timeout = Duration::from_secs(30); - - let key = signer_test.boot_to_epoch_3(timeout); - - info!("------------------------- Test Mine Block -------------------------"); - - signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); - let signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); - - assert!( - signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - info!("------------------------- Restart one Signer -------------------------"); - let signer_key = signer_test.stop_signer(2); - debug!( - "Removed signer 2 with key: {:?}, {}", - signer_key, - signer_key.to_hex() - ); - signer_test.restart_signer(2, signer_key); - - info!("------------------------- Test Mine Block after restart -------------------------"); - - signer_test.mine_nakamoto_block(timeout); - let proposed_signer_signature_hash = signer_test.wait_for_validate_ok_response(short_timeout); - let frost_signature = - signer_test.wait_for_confirmed_block(&proposed_signer_signature_hash, short_timeout); - - assert!( - frost_signature.verify(&key, proposed_signer_signature_hash.0.as_slice()), - "Signature verification failed" - ); - - signer_test.shutdown(); -}