Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ require (
github.com/prometheus/tsdb v0.7.1
github.com/rjeczalik/notify v0.9.1
github.com/rs/cors v1.7.0
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1
github.com/scroll-tech/da-codec v0.1.3-0.20250623141222-9e6ea1d439fa
github.com/scroll-tech/zktrie v0.8.4
github.com/shirou/gopsutil v3.21.11+incompatible
github.com/sourcegraph/conc v0.3.0
Expand Down
4 changes: 2 additions & 2 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -396,8 +396,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1 h1:6aKqJSal+QVdB5HMWMs0JTbAIZ6/iAHJx9qizz0w9dU=
github.com/scroll-tech/da-codec v0.1.3-0.20250519114140-bfa7133d4ad1/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
github.com/scroll-tech/da-codec v0.1.3-0.20250623141222-9e6ea1d439fa h1:IwobY81o8LeI1kV4WkbaYFh6oDtAfheFjUO/dapkNlE=
github.com/scroll-tech/da-codec v0.1.3-0.20250623141222-9e6ea1d439fa/go.mod h1:T+370kLxwACnmWaq4rgtKGyLCd7QSE6iy0JLfCp1qj4=
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
Expand Down
34 changes: 32 additions & 2 deletions rollup/fees/rollup_fee.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,12 @@ import (
"math/big"

"github.com/holiman/uint256"
"github.com/scroll-tech/da-codec/encoding/zstd"

"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rollup/rcfg"
)
Expand Down Expand Up @@ -168,8 +170,36 @@ func readGPOStorageSlots(addr common.Address, state StateDB) gpoState {
// calculateCompressionRatio computes the compression ratio of the data using zstd
// compression_ratio(tx) = size(tx) * PRECISION / size(zstd(tx))
func calculateCompressionRatio(data []byte) *big.Int {
// FIXME: This is a placeholder for the actual compression ratio calculation in another PR.
return rcfg.Precision
if len(data) == 0 {
return rcfg.Precision
}

// Compress data using zstd
compressed, err := zstd.CompressScrollBatchBytesStandard(data)
if err != nil {
log.Error("Batch compression failed, using 1.0 compression ratio", "error", err, "data size", len(data), "data", common.Bytes2Hex(data))
}

if len(compressed) == 0 {
log.Error("Compressed data is empty, using 1.0 compression ratio", "data size", len(data), "data", common.Bytes2Hex(data))
return rcfg.Precision
}

// compression_ratio = size(tx) * PRECISION / size(zstd(tx))
originalSize := new(big.Int).SetUint64(uint64(len(data)))
compressedSize := new(big.Int).SetUint64(uint64(len(compressed)))

// Make sure compression ratio >= 1 by checking if compressed data is bigger or equal to original data
// This behavior is consistent with DA Batch compression in codecv7 and later versions
if len(compressed) >= len(data) {
log.Debug("Compressed data is bigger or equal to the original data, using 1.0 compression ratio", "original size", len(data), "compressed size", len(compressed))
return rcfg.Precision
}

ratio := new(big.Int).Mul(originalSize, rcfg.Precision)
ratio.Div(ratio, compressedSize)

return ratio
}

// calculatePenalty computes the penalty multiplier based on compression ratio
Expand Down
8 changes: 4 additions & 4 deletions rollup/fees/rollup_fee_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,19 +37,19 @@ func TestL1DataFeeFeynman(t *testing.T) {
l1BlobBaseFee := new(big.Int).SetInt64(1_000_000_000)
execScalar := new(big.Int).SetInt64(10)
blobScalar := new(big.Int).SetInt64(20)
penaltyThreshold := new(big.Int).SetInt64(1_000_000_000) // 1 * PRECISION
penaltyThreshold := new(big.Int).SetInt64(6_000_000_000) // 6 * PRECISION
penaltyFactor := new(big.Int).SetInt64(2_000_000_000) // 2 * PRECISION (200% penalty)

// Test case 1: No penalty (compression ratio >= threshold)
t.Run("no penalty case", func(t *testing.T) {
data := make([]byte, 10) // txSize = 10
data := make([]byte, 100) // txSize = 100

// Since compression ratio will be >= penaltyThreshold, penalty = 1 * PRECISION
// feePerByte = execScalar * l1BaseFee + blobScalar * l1BlobBaseFee = 10 * 1_000_000_000 + 20 * 1_000_000_000 = 30_000_000_000
// l1DataFee = feePerByte * txSize * penalty / PRECISION / PRECISION
// = 30_000_000_000 * 10 * 1_000_000_000 / 1_000_000_000 / 1_000_000_000 = 300
// = 30_000_000_000 * 100 * 1_000_000_000 / 1_000_000_000 / 1_000_000_000 = 3000

expected := new(big.Int).SetInt64(300)
expected := new(big.Int).SetInt64(3000)

actual := calculateEncodedL1DataFeeFeynman(
data,
Expand Down
Loading