Skip to content

Commit 7a92e85

Browse files
georgehaoThegaramnoel2004Copilot
authored
add galileo CodecV9 (#66)
* add galileo CodecV9 * add missing switch cases and tests * feat: consider RLE blocks in compatibility check starting from V9 (#67) * fix: consider RLE blocks in zstd compatibility check (#64) * fix compability * fmt * override behavior * Apply suggestions from code review Co-authored-by: Copilot <[email protected]> --------- Co-authored-by: Ho <[email protected]> Co-authored-by: georgehao <[email protected]> Co-authored-by: Copilot <[email protected]> --------- Co-authored-by: Péter Garamvölgyi <[email protected]> Co-authored-by: Ho <[email protected]> Co-authored-by: Copilot <[email protected]>
1 parent b167b3f commit 7a92e85

File tree

6 files changed

+366
-46
lines changed

6 files changed

+366
-46
lines changed

encoding/codecv9.go

Lines changed: 221 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,221 @@
1+
package encoding
2+
3+
import (
4+
"crypto/sha256"
5+
"encoding/hex"
6+
"errors"
7+
"fmt"
8+
9+
"github.com/scroll-tech/go-ethereum/common"
10+
"github.com/scroll-tech/go-ethereum/crypto"
11+
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
12+
"github.com/scroll-tech/go-ethereum/log"
13+
)
14+
15+
// DACodecV9 updates the implementation of the base function checkCompressedDataCompatibility
16+
// to use the V9 compatibility check (checkCompressedDataCompatibilityV9) instead of the previous V7 version.
17+
//
18+
// As per Go's rules for shadowing methods with struct embedding, we need to override
19+
// all methods that (transitively) call checkCompressedDataCompatibility:
20+
// - checkCompressedDataCompatibility (updated to use V9)
21+
// - constructBlob (calls checkCompressedDataCompatibility)
22+
// - NewDABatch (calls constructBlob)
23+
// - CheckChunkCompressedDataCompatibility (calls CheckBatchCompressedDataCompatibility)
24+
// - CheckBatchCompressedDataCompatibility (calls checkCompressedDataCompatibility)
25+
// - estimateL1CommitBatchSizeAndBlobSize (calls checkCompressedDataCompatibility)
26+
// - EstimateChunkL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)
27+
// - EstimateBatchL1CommitBatchSizeAndBlobSize (calls estimateL1CommitBatchSizeAndBlobSize)
28+
29+
type DACodecV9 struct {
30+
DACodecV8
31+
}
32+
33+
func NewDACodecV9() *DACodecV9 {
34+
v := CodecV9
35+
return &DACodecV9{
36+
DACodecV8: DACodecV8{
37+
DACodecV7: DACodecV7{forcedVersion: &v},
38+
},
39+
}
40+
}
41+
42+
// checkCompressedDataCompatibility checks the compressed data compatibility for a batch.
43+
// It constructs a blob payload, compresses the data, and checks the compressed data compatibility.
44+
// flag checkLength indicates whether to check the length of the compressed data against the original data.
45+
// If checkLength is true, this function returns if compression is needed based on the compressed data's length, which is used when doing batch bytes encoding.
46+
// If checkLength is false, this function returns the result of the compatibility check, which is used when determining the chunk and batch contents.
47+
func (d *DACodecV9) checkCompressedDataCompatibility(payloadBytes []byte, checkLength bool) ([]byte, bool, error) {
48+
compressedPayloadBytes, err := d.CompressScrollBatchBytes(payloadBytes)
49+
if err != nil {
50+
return nil, false, fmt.Errorf("failed to compress blob payload: %w", err)
51+
}
52+
53+
if err = checkCompressedDataCompatibilityV9(compressedPayloadBytes); err != nil {
54+
log.Warn("Compressed data compatibility check failed", "err", err, "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
55+
return nil, false, nil
56+
}
57+
58+
// check if compressed data is bigger or equal to the original data -> no need to compress
59+
if checkLength && len(compressedPayloadBytes) >= len(payloadBytes) {
60+
log.Warn("Compressed data is bigger or equal to the original data", "payloadBytes", hex.EncodeToString(payloadBytes), "compressedPayloadBytes", hex.EncodeToString(compressedPayloadBytes))
61+
return nil, false, nil
62+
}
63+
64+
return compressedPayloadBytes, true, nil
65+
}
66+
67+
// NewDABatch creates a DABatch including blob from the provided Batch.
68+
func (d *DACodecV9) NewDABatch(batch *Batch) (DABatch, error) {
69+
if len(batch.Blocks) == 0 {
70+
return nil, errors.New("batch must contain at least one block")
71+
}
72+
73+
if err := checkBlocksBatchVSChunksConsistency(batch); err != nil {
74+
return nil, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
75+
}
76+
77+
blob, blobVersionedHash, blobBytes, challengeDigest, err := d.constructBlob(batch)
78+
if err != nil {
79+
return nil, fmt.Errorf("failed to construct blob: %w", err)
80+
}
81+
82+
daBatch, err := newDABatchV7(d.Version(), batch.Index, blobVersionedHash, batch.ParentBatchHash, blob, blobBytes, challengeDigest)
83+
if err != nil {
84+
return nil, fmt.Errorf("failed to construct DABatch: %w", err)
85+
}
86+
87+
return daBatch, nil
88+
}
89+
90+
func (d *DACodecV9) constructBlob(batch *Batch) (*kzg4844.Blob, common.Hash, []byte, common.Hash, error) {
91+
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)
92+
93+
payloadBytes, err := d.constructBlobPayload(batch)
94+
if err != nil {
95+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to construct blob payload: %w", err)
96+
}
97+
98+
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
99+
if err != nil {
100+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
101+
}
102+
103+
isCompressedFlag := uint8(0x0)
104+
if enableCompression {
105+
isCompressedFlag = 0x1
106+
payloadBytes = compressedPayloadBytes
107+
}
108+
109+
sizeSlice := encodeSize3Bytes(uint32(len(payloadBytes)))
110+
111+
blobBytes[blobEnvelopeV7OffsetVersion] = uint8(d.Version())
112+
copy(blobBytes[blobEnvelopeV7OffsetByteSize:blobEnvelopeV7OffsetCompressedFlag], sizeSlice)
113+
blobBytes[blobEnvelopeV7OffsetCompressedFlag] = isCompressedFlag
114+
blobBytes = append(blobBytes, payloadBytes...)
115+
116+
if len(blobBytes) > maxEffectiveBlobBytes {
117+
log.Error("ConstructBlob: Blob payload exceeds maximum size", "size", len(blobBytes), "blobBytes", hex.EncodeToString(blobBytes))
118+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("blob exceeds maximum size: got %d, allowed %d", len(blobBytes), maxEffectiveBlobBytes)
119+
}
120+
121+
// convert raw data to BLSFieldElements
122+
blob, err := makeBlobCanonical(blobBytes)
123+
if err != nil {
124+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to convert blobBytes to canonical form: %w", err)
125+
}
126+
127+
// compute blob versioned hash
128+
c, err := kzg4844.BlobToCommitment(blob)
129+
if err != nil {
130+
return nil, common.Hash{}, nil, common.Hash{}, fmt.Errorf("failed to create blob commitment: %w", err)
131+
}
132+
blobVersionedHash := kzg4844.CalcBlobHashV1(sha256.New(), &c)
133+
134+
// compute challenge digest for codecv7, different from previous versions,
135+
// the blob bytes are padded to the max effective blob size, which is 131072 / 32 * 31 due to the blob encoding
136+
paddedBlobBytes := make([]byte, maxEffectiveBlobBytes)
137+
copy(paddedBlobBytes, blobBytes)
138+
139+
challengeDigest := crypto.Keccak256Hash(crypto.Keccak256(paddedBlobBytes), blobVersionedHash[:])
140+
141+
return blob, blobVersionedHash, blobBytes, challengeDigest, nil
142+
}
143+
144+
// CheckChunkCompressedDataCompatibility checks the compressed data compatibility for a batch built from a single chunk.
145+
func (d *DACodecV9) CheckChunkCompressedDataCompatibility(c *Chunk) (bool, error) {
146+
// filling the needed fields for the batch used in the check
147+
b := &Batch{
148+
Chunks: []*Chunk{c},
149+
PrevL1MessageQueueHash: c.PrevL1MessageQueueHash,
150+
PostL1MessageQueueHash: c.PostL1MessageQueueHash,
151+
Blocks: c.Blocks,
152+
}
153+
154+
return d.CheckBatchCompressedDataCompatibility(b)
155+
}
156+
157+
// CheckBatchCompressedDataCompatibility checks the compressed data compatibility for a batch.
158+
func (d *DACodecV9) CheckBatchCompressedDataCompatibility(b *Batch) (bool, error) {
159+
if len(b.Blocks) == 0 {
160+
return false, errors.New("batch must contain at least one block")
161+
}
162+
163+
if err := checkBlocksBatchVSChunksConsistency(b); err != nil {
164+
return false, fmt.Errorf("failed to check blocks batch vs chunks consistency: %w", err)
165+
}
166+
167+
payloadBytes, err := d.constructBlobPayload(b)
168+
if err != nil {
169+
return false, fmt.Errorf("failed to construct blob payload: %w", err)
170+
}
171+
172+
// This check is only used for sanity checks. If the check fails, it means that the compression did not work as expected.
173+
// rollup-relayer will try popping the last chunk of the batch (or last block of the chunk when in proposing chunks) and try again to see if it works as expected.
174+
// Since length check is used for DA and proving efficiency, it does not need to be checked here.
175+
_, compatible, err := d.checkCompressedDataCompatibility(payloadBytes, false /* checkLength */)
176+
if err != nil {
177+
return false, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
178+
}
179+
180+
return compatible, nil
181+
}
182+
183+
func (d *DACodecV9) estimateL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
184+
if len(batch.Blocks) == 0 {
185+
return 0, 0, errors.New("batch must contain at least one block")
186+
}
187+
188+
blobBytes := make([]byte, blobEnvelopeV7OffsetPayload)
189+
190+
payloadBytes, err := d.constructBlobPayload(batch)
191+
if err != nil {
192+
return 0, 0, fmt.Errorf("failed to construct blob payload: %w", err)
193+
}
194+
195+
compressedPayloadBytes, enableCompression, err := d.checkCompressedDataCompatibility(payloadBytes, true /* checkLength */)
196+
if err != nil {
197+
return 0, 0, fmt.Errorf("failed to check batch compressed data compatibility: %w", err)
198+
}
199+
200+
if enableCompression {
201+
blobBytes = append(blobBytes, compressedPayloadBytes...)
202+
} else {
203+
blobBytes = append(blobBytes, payloadBytes...)
204+
}
205+
206+
return blobEnvelopeV7OffsetPayload + uint64(len(payloadBytes)), calculatePaddedBlobSize(uint64(len(blobBytes))), nil
207+
}
208+
209+
// EstimateChunkL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a single chunk.
210+
func (d *DACodecV9) EstimateChunkL1CommitBatchSizeAndBlobSize(chunk *Chunk) (uint64, uint64, error) {
211+
return d.estimateL1CommitBatchSizeAndBlobSize(&Batch{
212+
Blocks: chunk.Blocks,
213+
PrevL1MessageQueueHash: chunk.PrevL1MessageQueueHash,
214+
PostL1MessageQueueHash: chunk.PostL1MessageQueueHash,
215+
})
216+
}
217+
218+
// EstimateBatchL1CommitBatchSizeAndBlobSize estimates the L1 commit batch size and blob size for a batch.
219+
func (d *DACodecV9) EstimateBatchL1CommitBatchSizeAndBlobSize(batch *Batch) (uint64, uint64, error) {
220+
return d.estimateL1CommitBatchSizeAndBlobSize(batch)
221+
}

encoding/da.go

Lines changed: 63 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -509,6 +509,61 @@ func checkCompressedDataCompatibilityV7(data []byte) error {
509509
return nil
510510
}
511511

512+
// Sanity check if the compressed data (v9) is compatible with our circuit.
513+
// If we conclude that the data could not be decompressed, then we will
514+
// commit it uncompressed instead.
515+
func checkCompressedDataCompatibilityV9(data []byte) error {
516+
if len(data) < 16 {
517+
return fmt.Errorf("too small size (0x%x), what is it?", data)
518+
}
519+
520+
fheader := data[0]
521+
// it is not the encoding type we expected in our zstd header
522+
if fheader&63 != 32 {
523+
return fmt.Errorf("unexpected header type (%x)", fheader)
524+
}
525+
526+
// skip content size
527+
switch fheader >> 6 {
528+
case 0:
529+
data = data[2:]
530+
case 1:
531+
data = data[3:]
532+
case 2:
533+
data = data[5:]
534+
case 3:
535+
data = data[9:]
536+
default:
537+
panic("impossible")
538+
}
539+
540+
isLast := false
541+
// scan each block until done
542+
for len(data) > 3 && !isLast {
543+
isLast = (data[0] & 1) == 1
544+
blkType := (data[0] >> 1) & 3
545+
var blkSize uint
546+
if blkType == 1 { // RLE Block
547+
blkSize = 1
548+
} else {
549+
if blkType == 3 {
550+
return fmt.Errorf("encounter reserved block type at %v", data)
551+
}
552+
blkSize = (uint(data[2])*65536 + uint(data[1])*256 + uint(data[0])) >> 3
553+
}
554+
if len(data) < 3+int(blkSize) {
555+
return fmt.Errorf("wrong data len {%d}, expect min {%d}", len(data), 3+blkSize)
556+
}
557+
data = data[3+blkSize:]
558+
}
559+
560+
if !isLast {
561+
return fmt.Errorf("unexpected end before last block")
562+
}
563+
564+
return nil
565+
}
566+
512567
// makeBlobCanonical converts the raw blob data into the canonical blob representation of 4096 BLSFieldElements.
513568
// The canonical blob representation is a 32-byte array where every 31 bytes are prepended with 1 zero byte.
514569
// The kzg4844.Blob is a 4096-byte array, thus 0s are padded to the end of the array.
@@ -768,8 +823,10 @@ func GetHardforkName(config *params.ChainConfig, blockHeight, blockTimestamp uin
768823
return "euclid"
769824
} else if !config.IsFeynman(blockTimestamp) {
770825
return "euclidV2"
771-
} else {
826+
} else if !config.IsGalileo(blockTimestamp) {
772827
return "feynman"
828+
} else {
829+
return "galileo"
773830
}
774831
}
775832

@@ -791,8 +848,10 @@ func GetCodecVersion(config *params.ChainConfig, blockHeight, blockTimestamp uin
791848
return CodecV6
792849
} else if !config.IsFeynman(blockTimestamp) {
793850
return CodecV7
794-
} else {
851+
} else if !config.IsGalileo(blockTimestamp) {
795852
return CodecV8
853+
} else {
854+
return CodecV9
796855
}
797856
}
798857

@@ -821,7 +880,7 @@ func GetChunkEnableCompression(codecVersion CodecVersion, chunk *Chunk) (bool, e
821880
return false, nil
822881
case CodecV2, CodecV3:
823882
return true, nil
824-
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8:
883+
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8, CodecV9:
825884
return CheckChunkCompressedDataCompatibility(chunk, codecVersion)
826885
default:
827886
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)
@@ -835,7 +894,7 @@ func GetBatchEnableCompression(codecVersion CodecVersion, batch *Batch) (bool, e
835894
return false, nil
836895
case CodecV2, CodecV3:
837896
return true, nil
838-
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8:
897+
case CodecV4, CodecV5, CodecV6, CodecV7, CodecV8, CodecV9:
839898
return CheckBatchCompressedDataCompatibility(batch, codecVersion)
840899
default:
841900
return false, fmt.Errorf("unsupported codec version: %v", codecVersion)

encoding/interfaces.go

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ const (
9494
CodecV6
9595
CodecV7
9696
CodecV8
97+
CodecV9
9798
)
9899

99100
// CodecFromVersion returns the appropriate codec for the given version.
@@ -117,14 +118,18 @@ func CodecFromVersion(version CodecVersion) (Codec, error) {
117118
return &DACodecV7{}, nil
118119
case CodecV8:
119120
return NewDACodecV8(), nil
121+
case CodecV9:
122+
return NewDACodecV9(), nil
120123
default:
121124
return nil, fmt.Errorf("unsupported codec version: %v", version)
122125
}
123126
}
124127

125128
// CodecFromConfig determines and returns the appropriate codec based on chain configuration, block number, and timestamp.
126129
func CodecFromConfig(chainCfg *params.ChainConfig, startBlockNumber *big.Int, startBlockTimestamp uint64) Codec {
127-
if chainCfg.IsFeynman(startBlockTimestamp) {
130+
if chainCfg.IsGalileo(startBlockTimestamp) {
131+
return NewDACodecV9()
132+
} else if chainCfg.IsFeynman(startBlockTimestamp) {
128133
return NewDACodecV8()
129134
} else if chainCfg.IsEuclidV2(startBlockTimestamp) {
130135
return &DACodecV7{}

0 commit comments

Comments
 (0)