Skip to content

Commit

Permalink
Merge pull request #1402 from maticnetwork/v1.5.4-candidate
Browse files Browse the repository at this point in the history
V1.5.4 candidate
  • Loading branch information
cffls authored Jan 10, 2025
2 parents 37e6f70 + 36954c3 commit ceb62bb
Show file tree
Hide file tree
Showing 28 changed files with 225 additions and 173 deletions.
19 changes: 18 additions & 1 deletion .github/workflows/amoy_deb_profiles.yml
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,6 @@ jobs:
ARCH: all
NODE: sentry
NETWORK: amoy

- name: Running package build for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
run: dpkg-deb --build --root-owner-group packaging/deb/bor-pbss-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
Expand Down Expand Up @@ -233,6 +232,12 @@ jobs:
ARCH: all
NODE: validator
NETWORK: amoy
- name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
env:
ARCH: all
NODE: validator
NETWORK: amoy
- name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
Expand Down Expand Up @@ -271,6 +276,12 @@ jobs:
ARCH: all
NODE: validator
NETWORK: amoy
- name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-pbss-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
env:
ARCH: all
NODE: validator
NETWORK: amoy
- name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
run: dpkg-deb --build --root-owner-group packaging/deb/bor-pbss-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
Expand Down Expand Up @@ -315,6 +326,12 @@ jobs:
ARCH: all
NODE: archive
NETWORK: amoy
- name: Copying systemd file for ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }}
run: cp -rp packaging/templates/systemd/bor.service packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}/lib/systemd/system/
env:
ARCH: all
NODE: archive
NETWORK: amoy
- name: Building bor ${{ env.NODE }} on ${{ env.NETWORK }} on ${{ env.ARCH }} profile
run: dpkg-deb --build --root-owner-group packaging/deb/bor-${{ env.NETWORK }}-${{ env.NODE }}-config_${{ env.GIT_TAG }}-${{ env.ARCH }}
env:
Expand Down
6 changes: 3 additions & 3 deletions builder/files/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ syncmode = "full"
nolocals = true
pricelimit = 25000000000
accountslots = 16
globalslots = 32768
accountqueue = 16
globalqueue = 32768
globalslots = 131072
accountqueue = 64
globalqueue = 131072
lifetime = "1h30m0s"
# locals = []
# journal = ""
Expand Down
2 changes: 1 addition & 1 deletion cmd/clef/intapi_changelog.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ Additional labels for pre-release and build metadata are available as extensions

Added `clef_New` to the internal API callable from a UI.

> `New` creates a new password protected Account. The private key is protected with
> `New` creates a new password-protected Account. The private key is protected with
> the given password. Users are responsible to backup the private key that is stored
> in the keystore location that was specified when this API was created.
> This method is the same as New on the external API, the difference being that
Expand Down
5 changes: 3 additions & 2 deletions consensus/bor/bor.go
Original file line number Diff line number Diff line change
Expand Up @@ -468,8 +468,9 @@ func (c *Bor) verifyCascadingFields(chain consensus.ChainHeaderReader, header *t

// Verify the validator list match the local contract
if IsSprintStart(number+1, c.config.CalculateSprint(number)) {
newValidators, err := c.spanner.GetCurrentValidatorsByBlockNrOrHash(context.Background(), rpc.BlockNumberOrHashWithNumber(rpc.LatestBlockNumber), number+1)

// Use parent block's hash to make the eth_call to fetch validators so that the state being
// used to make the call is of the same fork.
newValidators, err := c.spanner.GetCurrentValidatorsByBlockNrOrHash(context.Background(), rpc.BlockNumberOrHashWithHash(header.ParentHash, false), number+1)
if err != nil {
return err
}
Expand Down
63 changes: 35 additions & 28 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
"github.com/ethereum/go-ethereum/common/mclock"
"github.com/ethereum/go-ethereum/common/prque"
"github.com/ethereum/go-ethereum/consensus"
"github.com/ethereum/go-ethereum/core/blockstm"
"github.com/ethereum/go-ethereum/core/rawdb"
"github.com/ethereum/go-ethereum/core/state"
"github.com/ethereum/go-ethereum/core/state/snapshot"
Expand Down Expand Up @@ -86,12 +85,15 @@ var (
blockImportTimer = metrics.NewRegisteredMeter("chain/imports", nil)
triedbCommitTimer = metrics.NewRegisteredTimer("chain/triedb/commits", nil)

blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
blockExecutionParallelCounter = metrics.NewRegisteredCounter("chain/execution/parallel", nil)
blockExecutionSerialCounter = metrics.NewRegisteredCounter("chain/execution/serial", nil)
blockInsertTimer = metrics.NewRegisteredTimer("chain/inserts", nil)
blockValidationTimer = metrics.NewRegisteredTimer("chain/validation", nil)
blockExecutionTimer = metrics.NewRegisteredTimer("chain/execution", nil)
blockWriteTimer = metrics.NewRegisteredTimer("chain/write", nil)
blockExecutionParallelCounter = metrics.NewRegisteredCounter("chain/execution/parallel", nil)
blockExecutionSerialCounter = metrics.NewRegisteredCounter("chain/execution/serial", nil)
blockExecutionParallelErrorCounter = metrics.NewRegisteredCounter("chain/execution/parallel/error", nil)
blockExecutionParallelTimer = metrics.NewRegisteredTimer("chain/execution/parallel/timer", nil)
blockExecutionSerialTimer = metrics.NewRegisteredTimer("chain/execution/serial/timer", nil)

blockReorgMeter = metrics.NewRegisteredMeter("chain/reorg/executes", nil)
blockReorgAddMeter = metrics.NewRegisteredMeter("chain/reorg/add", nil)
Expand Down Expand Up @@ -569,7 +571,7 @@ func NewParallelBlockChain(db ethdb.Database, cacheConfig *CacheConfig, genesis
return bc, nil
}

func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, blockEndErr error) {
func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (_ types.Receipts, _ []*types.Log, _ uint64, _ *state.StateDB, vtime time.Duration, blockEndErr error) {
// Process the block using processor and parallelProcessor at the same time, take the one which finishes first, cancel the other, and return the result
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Expand Down Expand Up @@ -597,6 +599,7 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (_
err error
statedb *state.StateDB
counter metrics.Counter
parallel bool
}

resultChan := make(chan Result, 2)
Expand All @@ -606,44 +609,58 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (_
if bc.parallelProcessor != nil {
parallelStatedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
return nil, nil, 0, nil, err
return nil, nil, 0, nil, 0, err
}
parallelStatedb.SetLogger(bc.logger)

processorCount++

go func() {
parallelStatedb.StartPrefetcher("chain", nil)
pstart := time.Now()
receipts, logs, usedGas, err := bc.parallelProcessor.Process(block, parallelStatedb, bc.vmConfig, ctx)
resultChan <- Result{receipts, logs, usedGas, err, parallelStatedb, blockExecutionParallelCounter}
blockExecutionParallelTimer.UpdateSince(pstart)
if err == nil {
vstart := time.Now()
err = bc.validator.ValidateState(block, parallelStatedb, receipts, usedGas, false)
vtime = time.Since(vstart)
}
resultChan <- Result{receipts, logs, usedGas, err, parallelStatedb, blockExecutionParallelCounter, true}
}()
}

if bc.processor != nil {
statedb, err := state.New(parent.Root, bc.stateCache, bc.snaps)
if err != nil {
return nil, nil, 0, nil, err
return nil, nil, 0, nil, 0, err
}
statedb.SetLogger(bc.logger)

processorCount++

go func() {
statedb.StartPrefetcher("chain", nil)
pstart := time.Now()
receipts, logs, usedGas, err := bc.processor.Process(block, statedb, bc.vmConfig, ctx)
resultChan <- Result{receipts, logs, usedGas, err, statedb, blockExecutionSerialCounter}
blockExecutionSerialTimer.UpdateSince(pstart)
if err == nil {
vstart := time.Now()
err = bc.validator.ValidateState(block, statedb, receipts, usedGas, false)
vtime = time.Since(vstart)
}
resultChan <- Result{receipts, logs, usedGas, err, statedb, blockExecutionSerialCounter, false}
}()
}

result := <-resultChan

if _, ok := result.err.(blockstm.ParallelExecFailedError); ok {
if result.parallel && result.err != nil {
log.Warn("Parallel state processor failed", "err", result.err)

blockExecutionParallelErrorCounter.Inc(1)
// If the parallel processor failed, we will fallback to the serial processor if enabled
if processorCount == 2 {
result.statedb.StopPrefetcher()
result = <-resultChan
result.statedb.StopPrefetcher()
processorCount--
}
}
Expand All @@ -658,7 +675,7 @@ func (bc *BlockChain) ProcessBlock(block *types.Block, parent *types.Header) (_
}()
}

return result.receipts, result.logs, result.usedGas, result.statedb, result.err
return result.receipts, result.logs, result.usedGas, result.statedb, vtime, result.err
}

// empty returns an indicator whether the blockchain is empty.
Expand Down Expand Up @@ -2323,7 +2340,7 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)

// Process block using the parent state as reference point
pstart := time.Now()
receipts, logs, usedGas, statedb, err := bc.ProcessBlock(block, parent)
receipts, logs, usedGas, statedb, vtime, err := bc.ProcessBlock(block, parent)
activeState = statedb

if err != nil {
Expand All @@ -2338,18 +2355,8 @@ func (bc *BlockChain) insertChain(chain types.Blocks, setHead bool) (int, error)
bc.stateSyncFeed.Send(StateSyncEvent{Data: data})
}
// BOR
ptime := time.Since(pstart)

vstart := time.Now()

if err := bc.validator.ValidateState(block, statedb, receipts, usedGas, false); err != nil {
bc.reportBlock(block, receipts, err)
followupInterrupt.Store(true)

return it.index, err
}
ptime := time.Since(pstart) - vtime

vtime := time.Since(vstart)
proctime := time.Since(start) // processing + validation

// Update the metrics touched during block processing and validation
Expand Down
72 changes: 71 additions & 1 deletion core/blockchain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
package core

import (
"context"
"errors"
"fmt"
"math/big"
Expand Down Expand Up @@ -170,7 +171,7 @@ func testBlockChainImport(chain types.Blocks, blockchain *BlockChain) error {
return err
}

receipts, _, usedGas, statedb, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header())
receipts, _, usedGas, statedb, _, err := blockchain.ProcessBlock(block, blockchain.GetBlockByHash(block.ParentHash()).Header())

if err != nil {
blockchain.reportBlock(block, receipts, err)
Expand Down Expand Up @@ -216,6 +217,75 @@ func testParallelBlockChainImport(t *testing.T, scheme string) {
}
}

type AlwaysFailParallelStateProcessor struct {
}

func (p *AlwaysFailParallelStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config, interruptCtx context.Context) (types.Receipts, []*types.Log, uint64, error) {
return nil, nil, 0, errors.New("always fail")
}

type SlowSerialStateProcessor struct {
s Processor
}

func NewSlowSerialStateProcessor(s Processor) *SlowSerialStateProcessor {
return &SlowSerialStateProcessor{s: s}
}

func (p *SlowSerialStateProcessor) Process(block *types.Block, statedb *state.StateDB, cfg vm.Config, interruptCtx context.Context) (types.Receipts, []*types.Log, uint64, error) {
time.Sleep(100 * time.Millisecond)
return p.s.Process(block, statedb, cfg, interruptCtx)
}

func TestSuccessfulBlockImportParallelFailed(t *testing.T) {
t.Parallel()

testSuccessfulBlockImportParallelFailed(t, rawdb.HashScheme)
testSuccessfulBlockImportParallelFailed(t, rawdb.PathScheme)
}

func testSuccessfulBlockImportParallelFailed(t *testing.T, scheme string) {
// Create a new blockchain with 10 initial blocks
db, _, blockchain, err := newCanonical(ethash.NewFaker(), 10, true, scheme)
blockchain.parallelProcessor = &AlwaysFailParallelStateProcessor{}
blockchain.processor = NewSlowSerialStateProcessor(blockchain.processor)
if err != nil {
t.Fatalf("failed to create canonical chain: %v", err)
}
defer blockchain.Stop()

// Create valid blocks to import
block := blockchain.GetBlockByHash(blockchain.CurrentBlock().Hash())
blocks := makeBlockChain(blockchain.chainConfig, block, 5, ethash.NewFaker(), db, canonicalSeed)

// Import the blocks
n, err := blockchain.InsertChain(blocks)
if err != nil {
t.Fatalf("failed to import valid blocks: %v", err)
}

// Verify all blocks were imported
if n != len(blocks) {
t.Errorf("imported %d blocks, wanted %d", n, len(blocks))
}

// Verify the last block is properly linked
if blockchain.CurrentBlock().Hash() != blocks[len(blocks)-1].Hash() {
t.Errorf("current block hash mismatch: got %x, want %x",
blockchain.CurrentBlock().Hash(),
blocks[len(blocks)-1].Hash())
}

// Verify block numbers are sequential
for i, block := range blocks {
expectedNumber := uint64(11 + i) // 10 initial blocks + new blocks
if block.NumberU64() != expectedNumber {
t.Errorf("block %d has wrong number: got %d, want %d",
i, block.NumberU64(), expectedNumber)
}
}
}

// testHeaderChainImport tries to process a chain of header, writing them into
// the database if successful.
func testHeaderChainImport(chain []*types.Header, blockchain *BlockChain) error {
Expand Down
32 changes: 19 additions & 13 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -854,22 +854,28 @@ func (s *StateDB) SetState(addr common.Address, key, value common.Hash) {
// storage. This function should only be used for debugging and the mutations
// must be discarded afterwards.
func (s *StateDB) SetStorage(addr common.Address, storage map[common.Hash]common.Hash) {
// SetStorage needs to wipe existing storage. We achieve this by pretending
// that the account self-destructed earlier in this block, by flagging
// it in stateObjectsDestruct. The effect of doing so is that storage lookups
// will not hit disk, since it is assumed that the disk-data is belonging
// SetStorage needs to wipe the existing storage. We achieve this by marking
// the account as self-destructed in this block. The effect is that storage
// lookups will not hit the disk, as it is assumed that the disk data belongs
// to a previous incarnation of the object.
//
// TODO(rjl493456442) this function should only be supported by 'unwritable'
// state and all mutations made should all be discarded afterwards.
if _, ok := s.stateObjectsDestruct[addr]; !ok {
s.stateObjectsDestruct[addr] = nil
// TODO (rjl493456442): This function should only be supported by 'unwritable'
// state, and all mutations made should be discarded afterward.
obj := s.getStateObject(addr)
if obj != nil {
if _, ok := s.stateObjectsDestruct[addr]; !ok {
s.stateObjectsDestruct[addr] = obj
}
}

stateObject := s.getOrNewStateObject(addr)

newObj := s.createObject(addr)
for k, v := range storage {
stateObject.SetState(k, v)
newObj.SetState(k, v)
}
// Inherit the metadata of original object if it was existent
if obj != nil {
newObj.SetCode(common.BytesToHash(obj.CodeHash()), obj.code)
newObj.SetNonce(obj.Nonce())
newObj.SetBalance(obj.Balance(), tracing.BalanceChangeUnspecified)
}
}

Expand Down Expand Up @@ -988,7 +994,7 @@ func (s *StateDB) getStateObject(addr common.Address) *stateObject {
var data *types.StateAccount
if s.snap != nil {
start := time.Now()
acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes()))
acc, err := s.snap.Account(crypto.HashData(crypto.NewKeccakState(), addr.Bytes()))
s.SnapshotAccountReads += time.Since(start)
if err == nil {
if acc == nil {
Expand Down
6 changes: 3 additions & 3 deletions docs/cli/server.md
Original file line number Diff line number Diff line change
Expand Up @@ -290,13 +290,13 @@ The ```bor server``` command runs the Bor client.

### Transaction Pool Options

- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account (default: 16)
- ```txpool.accountqueue```: Maximum number of non-executable transaction slots permitted per account (default: 64)

- ```txpool.accountslots```: Minimum number of executable transaction slots guaranteed per account (default: 16)

- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts (default: 32768)
- ```txpool.globalqueue```: Maximum number of non-executable transaction slots for all accounts (default: 131072)

- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts (default: 32768)
- ```txpool.globalslots```: Maximum number of executable transaction slots for all accounts (default: 131072)

- ```txpool.journal```: Disk journal for local transaction to survive node restarts (default: transactions.rlp)

Expand Down
Loading

0 comments on commit ceb62bb

Please sign in to comment.