Skip to content

Commit c866dfd

Browse files
authored
core: remove outdated tests (#27662)
Back before #27178 , we spun up a number of ethash verifiers to verify headers. So we also had tests to ensure that we were indeed able to abort verification even if we had multiple workers running. With PR #27178, we removed the parallelism in verification, and these tests are now failing, since we now just sequentially fire away the results as fast as possible on one routine. This change removes the (sometimes failing) tests
1 parent cbf2579 commit c866dfd

File tree

2 files changed

+9
-122
lines changed

2 files changed

+9
-122
lines changed

core/block_validator_test.go

Lines changed: 0 additions & 113 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,6 @@ package core
1818

1919
import (
2020
"math/big"
21-
"runtime"
2221
"testing"
2322
"time"
2423

@@ -235,118 +234,6 @@ func testHeaderVerificationForMerging(t *testing.T, isClique bool) {
235234
}
236235
}
237236

238-
// Tests that concurrent header verification works, for both good and bad blocks.
239-
func TestHeaderConcurrentVerification2(t *testing.T) { testHeaderConcurrentVerification(t, 2) }
240-
func TestHeaderConcurrentVerification8(t *testing.T) { testHeaderConcurrentVerification(t, 8) }
241-
func TestHeaderConcurrentVerification32(t *testing.T) { testHeaderConcurrentVerification(t, 32) }
242-
243-
func testHeaderConcurrentVerification(t *testing.T, threads int) {
244-
// Create a simple chain to verify
245-
var (
246-
gspec = &Genesis{Config: params.TestChainConfig}
247-
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 8, nil)
248-
)
249-
headers := make([]*types.Header, len(blocks))
250-
for i, block := range blocks {
251-
headers[i] = block.Header()
252-
}
253-
// Set the number of threads to verify on
254-
old := runtime.GOMAXPROCS(threads)
255-
defer runtime.GOMAXPROCS(old)
256-
257-
// Run the header checker for the entire block chain at once both for a valid and
258-
// also an invalid chain (enough if one arbitrary block is invalid).
259-
for i, valid := range []bool{true, false} {
260-
var results <-chan error
261-
262-
if valid {
263-
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFaker(), vm.Config{}, nil, nil)
264-
_, results = chain.engine.VerifyHeaders(chain, headers)
265-
chain.Stop()
266-
} else {
267-
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeFailer(uint64(len(headers)-1)), vm.Config{}, nil, nil)
268-
_, results = chain.engine.VerifyHeaders(chain, headers)
269-
chain.Stop()
270-
}
271-
// Wait for all the verification results
272-
checks := make(map[int]error)
273-
for j := 0; j < len(blocks); j++ {
274-
select {
275-
case result := <-results:
276-
checks[j] = result
277-
278-
case <-time.After(time.Second):
279-
t.Fatalf("test %d.%d: verification timeout", i, j)
280-
}
281-
}
282-
// Check nonce check validity
283-
for j := 0; j < len(blocks); j++ {
284-
want := valid || (j < len(blocks)-2) // We chose the last-but-one nonce in the chain to fail
285-
if (checks[j] == nil) != want {
286-
t.Errorf("test %d.%d: validity mismatch: have %v, want %v", i, j, checks[j], want)
287-
}
288-
if !want {
289-
// A few blocks after the first error may pass verification due to concurrent
290-
// workers. We don't care about those in this test, just that the correct block
291-
// errors out.
292-
break
293-
}
294-
}
295-
// Make sure no more data is returned
296-
select {
297-
case result := <-results:
298-
t.Fatalf("test %d: unexpected result returned: %v", i, result)
299-
case <-time.After(25 * time.Millisecond):
300-
}
301-
}
302-
}
303-
304-
// Tests that aborting a header validation indeed prevents further checks from being
305-
// run, as well as checks that no left-over goroutines are leaked.
306-
func TestHeaderConcurrentAbortion2(t *testing.T) { testHeaderConcurrentAbortion(t, 2) }
307-
func TestHeaderConcurrentAbortion8(t *testing.T) { testHeaderConcurrentAbortion(t, 8) }
308-
func TestHeaderConcurrentAbortion32(t *testing.T) { testHeaderConcurrentAbortion(t, 32) }
309-
310-
func testHeaderConcurrentAbortion(t *testing.T, threads int) {
311-
// Create a simple chain to verify
312-
var (
313-
gspec = &Genesis{Config: params.TestChainConfig}
314-
_, blocks, _ = GenerateChainWithGenesis(gspec, ethash.NewFaker(), 1024, nil)
315-
)
316-
headers := make([]*types.Header, len(blocks))
317-
for i, block := range blocks {
318-
headers[i] = block.Header()
319-
}
320-
// Set the number of threads to verify on
321-
old := runtime.GOMAXPROCS(threads)
322-
defer runtime.GOMAXPROCS(old)
323-
324-
// Start the verifications and immediately abort
325-
chain, _ := NewBlockChain(rawdb.NewMemoryDatabase(), nil, gspec, nil, ethash.NewFakeDelayer(time.Millisecond), vm.Config{}, nil, nil)
326-
defer chain.Stop()
327-
328-
abort, results := chain.engine.VerifyHeaders(chain, headers)
329-
close(abort)
330-
331-
// Deplete the results channel
332-
verified := 0
333-
for depleted := false; !depleted; {
334-
select {
335-
case result := <-results:
336-
if result != nil {
337-
t.Errorf("header %d: validation failed: %v", verified, result)
338-
}
339-
verified++
340-
case <-time.After(50 * time.Millisecond):
341-
depleted = true
342-
}
343-
}
344-
// Check that abortion was honored by not processing too many POWs
345-
if verified > 2*threads {
346-
t.Errorf("verification count too large: have %d, want below %d", verified, 2*threads)
347-
}
348-
}
349-
350237
func TestCalcGasLimit(t *testing.T) {
351238
for i, tc := range []struct {
352239
pGasLimit uint64

core/txpool/legacypool/legacypool_test.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1090,7 +1090,7 @@ func testQueueTimeLimiting(t *testing.T, nolocals bool) {
10901090
t.Fatalf("pending transactions mismatched: have %d, want %d", pending, 2)
10911091
}
10921092
if queued != 2 {
1093-
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 3)
1093+
t.Fatalf("queued transactions mismatched: have %d, want %d", queued, 2)
10941094
}
10951095
if err := validatePoolInternals(pool); err != nil {
10961096
t.Fatalf("pool internal state corrupted: %v", err)
@@ -1455,7 +1455,7 @@ func TestRepricing(t *testing.T) {
14551455
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(2), keys[1])); err != nil {
14561456
t.Fatalf("failed to add pending transaction: %v", err)
14571457
}
1458-
if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
1458+
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(2), keys[2])); err != nil {
14591459
t.Fatalf("failed to add queued transaction: %v", err)
14601460
}
14611461
if err := validateEvents(events, 5); err != nil {
@@ -1582,7 +1582,7 @@ func TestRepricingDynamicFee(t *testing.T) {
15821582
t.Fatalf("failed to add pending transaction: %v", err)
15831583
}
15841584
tx = dynamicFeeTx(2, 100000, big.NewInt(2), big.NewInt(2), keys[2])
1585-
if err := pool.addRemote(tx); err != nil {
1585+
if err := pool.addRemoteSync(tx); err != nil {
15861586
t.Fatalf("failed to add queued transaction: %v", err)
15871587
}
15881588
if err := validateEvents(events, 5); err != nil {
@@ -1723,18 +1723,18 @@ func TestUnderpricing(t *testing.T) {
17231723
t.Fatalf("pool internal state corrupted: %v", err)
17241724
}
17251725
// Ensure that adding an underpriced transaction on block limit fails
1726-
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
1726+
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) {
17271727
t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced)
17281728
}
17291729
// Replace a future transaction with a future transaction
1730-
if err := pool.addRemote(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
1730+
if err := pool.addRemoteSync(pricedTransaction(1, 100000, big.NewInt(2), keys[1])); err != nil { // +K1:1 => -K1:1 => Pend K0:0, K0:1, K2:0; Que K1:1
17311731
t.Fatalf("failed to add well priced transaction: %v", err)
17321732
}
17331733
// Ensure that adding high priced transactions drops cheap ones, but not own
1734-
if err := pool.addRemote(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
1734+
if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(3), keys[1])); err != nil { // +K1:0 => -K1:1 => Pend K0:0, K0:1, K1:0, K2:0; Que -
17351735
t.Fatalf("failed to add well priced transaction: %v", err)
17361736
}
1737-
if err := pool.addRemote(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
1737+
if err := pool.addRemoteSync(pricedTransaction(2, 100000, big.NewInt(4), keys[1])); err != nil { // +K1:2 => -K0:0 => Pend K1:0, K2:0; Que K0:1 K1:2
17381738
t.Fatalf("failed to add well priced transaction: %v", err)
17391739
}
17401740
if err := pool.addRemote(pricedTransaction(3, 100000, big.NewInt(5), keys[1])); err != nil { // +K1:3 => -K0:1 => Pend K1:0, K2:0; Que K1:2 K1:3
@@ -1915,11 +1915,11 @@ func TestUnderpricingDynamicFee(t *testing.T) {
19151915
}
19161916

19171917
tx = pricedTransaction(1, 100000, big.NewInt(3), keys[1])
1918-
if err := pool.addRemote(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
1918+
if err := pool.addRemoteSync(tx); err != nil { // +K1:2, -K0:1 => Pend K0:0 K1:0, K2:0; Que K1:2
19191919
t.Fatalf("failed to add well priced transaction: %v", err)
19201920
}
19211921
tx = dynamicFeeTx(2, 100000, big.NewInt(4), big.NewInt(1), keys[1])
1922-
if err := pool.addRemote(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
1922+
if err := pool.addRemoteSync(tx); err != nil { // +K1:3, -K1:0 => Pend K0:0 K2:0; Que K1:2 K1:3
19231923
t.Fatalf("failed to add well priced transaction: %v", err)
19241924
}
19251925
pending, queued = pool.Stats()

0 commit comments

Comments
 (0)