Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
type blockchainInterface interface {
// Header returns the current header of the chain (genesis if empty)
Header() *types.Header
// GetReceiptsByHash returns the receipts for a hash
GetReceiptsByHash(hash types.Hash) ([]*types.Receipt, error)
// Subscribe subscribes for chain head events
SubscribeEvents() blockchain.Subscription
// GetHeaderByNumber returns the header by number
GetHeaderByNumber(block uint64) (*types.Header, bool)
// GetAvgGasPrice returns the average gas price
GetAvgGasPrice() *big.Int
// AddTx adds a new transaction to the tx pool
AddTx(tx *types.Transaction) error
// State returns a reference to the state
State() state.State
// BeginTxn starts a transition object
BeginTxn(parentRoot types.Hash, header *types.Header) (*state.Transition, error)
// GetBlockByHash gets a block using the provided hash
GetBlockByHash(hash types.Hash, full bool) (*types.Block, bool)
// ApplyTxn applies a transaction object to the blockchain
ApplyTxn(header *types.Header, txn *types.Transaction) ([]byte, bool, error)
stateHelperInterface
}jsonrpc/eth_endpoint.gotype Filter struct {
id string
// block filter
block *headElem
// log cache
logs []*Log
// log filter
logFilter *LogFilter
// index of the filter in the timer array
index int
// next time to timeout
timestamp time.Time
// websocket connection
ws wsConn
}
type FilterManager struct {
logger hclog.Logger
store blockchainInterface
closeCh chan struct{}
subscription blockchain.Subscription
filters map[string]*Filter
lock sync.Mutex
updateCh chan struct{}
timer timeHeapImpl
timeout time.Duration
blockStream *blockStream
}
func (f *FilterManager) Run() {
// watch for new events in the blockchain
watchCh := make(chan *blockchain.Event)
go func() {
for {
evnt := f.subscription.GetEvent()
if evnt == nil {
return
}
watchCh <- evnt
}
}()
var timeoutCh <-chan time.Time
for {
// check for the next filter to be removed
filter := f.nextTimeoutFilter()
if filter == nil {
timeoutCh = nil
} else {
timeoutCh = time.After(filter.timestamp.Sub(time.Now()))
}
select {
case evnt := <-watchCh:
// new blockchain event
if err := f.dispatchEvent(evnt); err != nil {
f.logger.Error("failed to dispatch event", "err", err)
}
case <-timeoutCh:
// timeout for filter
if !f.Uninstall(filter.id) {
f.logger.Error("failed to uninstall filter", "id", filter.id)
}
case <-f.updateCh:
// there is a new filter, reset the loop to start the timeout timer
case <-f.closeCh:
// stop the filter manager
return
}
}
}// WriteBlocks writes a batch of blocks
func (b *Blockchain) WriteBlocks(blocks []*types.Block) error {
if len(blocks) == 0 {
return fmt.Errorf("no headers found to insert")
}
parent, ok := b.readHeader(blocks[0].ParentHash())
if !ok {
return fmt.Errorf("parent of %s (%d) not found: %s", blocks[0].Hash().String(), blocks[0].Number(), blocks[0].ParentHash())
}
// validate chain
for i := 0; i < len(blocks); i++ {
block := blocks[i]
if block.Number()-1 != parent.Number {
return fmt.Errorf("number sequence not correct at %d, %d and %d", i, block.Number(), parent.Number)
}
if block.ParentHash() != parent.Hash {
return fmt.Errorf("parent hash not correct")
}
if err := b.consensus.VerifyHeader(parent, block.Header, false, true); err != nil {
return fmt.Errorf("failed to verify the header: %v", err)
}
// verify body data
if hash := buildroot.CalculateUncleRoot(block.Uncles); hash != block.Header.Sha3Uncles {
return fmt.Errorf("uncle root hash mismatch: have %s, want %s", hash, block.Header.Sha3Uncles)
}
if hash := buildroot.CalculateTransactionsRoot(block.Transactions); hash != block.Header.TxRoot {
return fmt.Errorf("transaction root hash mismatch: have %s, want %s", hash, block.Header.TxRoot)
}
parent = block.Header
}
// Write chain
for indx, block := range blocks {
header := block.Header
body := block.Body()
if err := b.db.WriteBody(header.Hash, block.Body()); err != nil {
return err
}
b.bodiesCache.Add(header.Hash, body)
// Verify uncles. It requires to have the bodies on memory
if err := b.VerifyUncles(block); err != nil {
return err
}
// Process and validate the block
if err := b.processBlock(blocks[indx]); err != nil {
return err
}
// Write the header to the chain
evnt := &Event{}
if err := b.writeHeaderImpl(evnt, header); err != nil {
return err
}
b.dispatchEvent(evnt)
// Update the average gas price
b.UpdateGasPriceAvg(new(big.Int).SetUint64(header.GasUsed))
}
return nil
}type Subscription interface {
// Returns a Blockchain Event channel
GetEventCh() chan *Event
// Returns the latest event (blocking)
GetEvent() *Event
// Closes the subscription
Close()
}type Event struct {
// Old chain removed if there was a reorg
OldChain []*types.Header
// New part of the chain (or a fork)
NewChain []*types.Header
// Difficulty is the new difficulty created with this event
Difficulty *big.Int
// Type is the type of event
Type EventType
// Source is the source that generated the blocks for the event
// right now it can be either the Sealer or the Syncer. TODO
Source string
}// Consensus is the interface for consensus
type Consensus interface {
// VerifyHeader verifies the header is correct
VerifyHeader(parent, header *types.Header) error
// Start starts the consensus
Start() error
// Close closes the connection
Close() error
}// Config is the configuration for the consensus
type Config struct {
// Logger to be used by the backend
Logger *log.Logger
// Params are the params of the chain and the consensus
Params *chain.Params
// Specific configuration parameters for the backend
Config map[string]interface{}
// Path for the consensus protocol to store information
Path string
}type IstanbulExtra struct {
Validators []types.Address
Seal []byte
CommittedSeal [][]byte
}func signHash(h *types.Header) ([]byte, error) {
//hash := istambulHeaderHash(h)
//return hash.Bytes(), nil
h = h.Copy() // make a copy since we update the extra field
arena := fastrlp.DefaultArenaPool.Get()
defer fastrlp.DefaultArenaPool.Put(arena)
// when hashign the block for signing we have to remove from
// the extra field the seal and commitedseal items
extra, err := getIbftExtra(h)
if err != nil {
return nil, err
}
putIbftExtraValidators(h, extra.Validators)
vv := arena.NewArray()
vv.Set(arena.NewBytes(h.ParentHash.Bytes()))
vv.Set(arena.NewBytes(h.Sha3Uncles.Bytes()))
vv.Set(arena.NewBytes(h.Miner.Bytes()))
vv.Set(arena.NewBytes(h.StateRoot.Bytes()))
vv.Set(arena.NewBytes(h.TxRoot.Bytes()))
vv.Set(arena.NewBytes(h.ReceiptsRoot.Bytes()))
vv.Set(arena.NewBytes(h.LogsBloom[:]))
vv.Set(arena.NewUint(h.Difficulty))
vv.Set(arena.NewUint(h.Number))
vv.Set(arena.NewUint(h.GasLimit))
vv.Set(arena.NewUint(h.GasUsed))
vv.Set(arena.NewUint(h.Timestamp))
vv.Set(arena.NewCopyBytes(h.ExtraData))
buf := keccak.Keccak256Rlp(nil, vv)
return buf, nil
}func verifyCommitedFields(snap *Snapshot, header *types.Header) error {
extra, err := getIbftExtra(header)
if err != nil {
return err
}
if len(extra.CommittedSeal) == 0 {
return fmt.Errorf("empty committed seals")
}
// get the message that needs to be signed
signMsg, err := signHash(header)
if err != nil {
return err
}
signMsg = commitMsg(signMsg)
visited := map[types.Address]struct{}{}
for _, seal := range extra.CommittedSeal {
addr, err := ecrecoverImpl(seal, signMsg)
if err != nil {
return err
}
if _, ok := visited[addr]; ok {
return fmt.Errorf("repeated seal")
} else {
if !snap.Set.Includes(addr) {
return fmt.Errorf("signed by non validator")
}
visited[addr] = struct{}{}
}
}
validSeals := len(visited)
if validSeals <= 2*snap.Set.MinFaultyNodes() {
return fmt.Errorf("not enough seals to seal block")
}
return nil
}func (i *Ibft) processHeaders(headers []*types.Header) error {
if len(headers) == 0 {
return nil
}
parentSnap, err := i.getSnapshot(headers[0].Number - 1)
if err != nil {
return err
}
snap := parentSnap.Copy()
saveSnap := func(h *types.Header) error {
if snap.Equal(parentSnap) {
return nil
}
snap.Number = h.Number
snap.Hash = h.Hash.String()
i.store.add(snap)
parentSnap = snap
snap = parentSnap.Copy()
return nil
}
for _, h := range headers {
number := h.Number
validator, err := ecrecoverFromHeader(h)
if err != nil {
return err
}
if !snap.Set.Includes(validator) {
return fmt.Errorf("unauthroized validator")
}
if number%i.epochSize == 0 {
// during a checkpoint block, we reset the voles
// and there cannot be any proposals
snap.Votes = nil
if err := saveSnap(h); err != nil {
return err
}
// remove in-memory snaphots from two epochs before this one
epoch := int(number/i.epochSize) - 2
if epoch > 0 {
purgeBlock := uint64(epoch) * i.epochSize
i.store.deleteLower(purgeBlock)
}
continue
}
// if we have a miner address, this might be a vote
if h.Miner == types.ZeroAddress {
continue
}
// the nonce selects the action
var authorize bool
if h.Nonce == nonceAuthVote {
authorize = true
} else if h.Nonce == nonceDropVote {
authorize = false
} else {
return fmt.Errorf("incorrect vote nonce")
}
// validate the vote
if authorize {
// we can only authorize if they are not on the validators list
if snap.Set.Includes(h.Miner) {
continue
}
} else {
// we can only remove if they are part of the validators list
if !snap.Set.Includes(h.Miner) {
continue
}
}
count := snap.Count(func(v *Vote) bool {
return v.Validator == validator && v.Address == h.Miner
})
if count > 1 {
// there can only be one vote per validator per address
return fmt.Errorf("more than one proposal per validator per address found")
}
if count == 0 {
// cast the new vote since there is no one yet
snap.Votes = append(snap.Votes, &Vote{
Validator: validator,
Address: h.Miner,
Authorize: authorize,
})
}
// check the tally for the proposed validator
tally := snap.Count(func(v *Vote) bool {
return v.Address == h.Miner
})
if tally > snap.Set.Len()/2 {
if authorize {
// add the proposal to the validator list
snap.Set.Add(h.Miner)
} else {
// remove the proposal from the validators list
snap.Set.Del(h.Miner)
// remove any votes casted by the removed validator
snap.RemoveVotes(func(v *Vote) bool {
return v.Validator == h.Miner
})
}
// remove all the votes that promoted this validator
snap.RemoveVotes(func(v *Vote) bool {
return v.Address == h.Miner
})
}
if err := saveSnap(h); err != nil {
return nil
}
}
// update the metadata
i.store.updateLastBlock(headers[len(headers)-1].Number)
return nil
}type snapshotStore struct {
lastNumber uint64
lock sync.Mutex
list snapshotSortedList
}func (i *Ibft) setupTransport() error {
// use a gossip protocol
topic, err := i.network.NewTopic(ibftProto, &proto.MessageReq{})
if err != nil {
return err
}
err = topic.Subscribe(func(obj interface{}) {
msg := obj.(*proto.MessageReq)
if !i.isSealing() {
// if we are not sealing we do not care about the messages
// but we need to subscribe to propagate the messages
return
}
// decode sender
if err := validateMsg(msg); err != nil {
i.logger.Error("failed to validate msg", "err", err)
return
}
if msg.From == i.validatorKeyAddr.String() {
// we are the sender, skip this message since we already
// relay our own messages internally.
return
}
i.pushMessage(msg)
})
if err != nil {
return err
}
i.transport = &gossipTransport{topic: topic}
return nil
}message MessageReq {
// type is the type of the message
Type type = 1;
// from is the address of the sender
string from = 2;
// seal is the committed seal if message is commit
string seal = 3;
// signature is the crypto signature of the message
string signature = 4;
// view is the view assigned to the message
View view = 5;
// hash of the locked block
string digest = 6;
// proposal is the rlp encoded block in preprepare messages
google.protobuf.Any proposal = 7;
enum Type {
Preprepare = 0;
Prepare = 1;
Commit = 2;
RoundChange = 3;
}
}
message View {
uint64 round = 1;
uint64 sequence = 2;
}func (i *Ibft) start() {
// consensus always starts in SyncState mode in case it needs
// to sync with other nodes.
i.setState(SyncState)
header := i.blockchain.Header()
i.logger.Debug("current sequence", "sequence", header.Number+1)
for {
select {
case <-i.closeCh:
return
default:
}
i.runCycle()
}
}
func (i *Ibft) runCycle() {
if i.state.view != nil {
i.logger.Debug(
"cycle",
"state",
i.getState(),
"sequence",
i.state.view.Sequence,
"round",
i.state.view.Round,
)
}
switch i.getState() {
case AcceptState:
i.runAcceptState()
case ValidateState:
i.runValidateState()
case RoundChangeState:
i.runRoundChangeState()
case SyncState:
i.runSyncState()
}
}func (s *Sealer) run(ctx context.Context) {
sub := s.blockchain.SubscribeEvents()
eventCh := sub.GetEventCh()
for {
if s.config.DevMode {
// In dev-mode we wait for new transactions to seal blocks
select {
case <-s.wakeCh:
case <-ctx.Done():
return
}
}
// start sealing
subCtx, cancel := context.WithCancel(ctx)
done := s.sealAsync(subCtx)
// wait for the sealing to be done
select {
case <-done:
// the sealing process has finished
case <-ctx.Done():
// the sealing routine has been canceled
case <-eventCh:
// there is a new head, reset sealer
}
// cancel the sealing process context
cancel()
if ctx.Err() != nil {
return
}
}
}service V1 {
// Returns status information regarding the specific point in time
rpc GetCurrent(google.protobuf.Empty) returns (V1Status);
// Returns any type of object (Header, Body, Receipts...)
rpc GetObjectsByHash(HashRequest) returns (Response);
// Returns a range of headers
rpc GetHeaders(GetHeadersRequest) returns (Response);
// Watches what new blocks get included
rpc Watch(google.protobuf.Empty) returns (stream V1Status);
}message V1Status {
string difficulty = 1;
string hash = 2;
int64 number = 3;
}service TxnPoolOperator {
// Status returns the current status of the pool
rpc Status(google.protobuf.Empty) returns (TxnPoolStatusResp);
// AddTxn adds a local transaction to the pool
rpc AddTxn(AddTxnReq) returns (google.protobuf.Empty);
// Subscribe subscribes for new events in the txpool
rpc Subscribe(google.protobuf.Empty) returns (stream TxPoolEvent);
}
// AddTx adds a new transaction to the pool
func (t *TxPool) AddTx(tx *types.Transaction) error {
if err := t.addImpl("addTxn", tx); err != nil {
return err
}
// broadcast the transaction only if network is enabled
// and we are not in dev mode
if t.topic != nil && !t.dev {
txn := &proto.Txn{
Raw: &any.Any{
Value: tx.MarshalRLP(),
},
}
if err := t.topic.Publish(txn); err != nil {
t.logger.Error("failed to topic txn", "err", err)
}
}
if t.NotifyCh != nil {
select {
case t.NotifyCh <- struct{}{}:
default:
}
}
return nil
}
func (t *TxPool) addImpl(ctx string, txns ...*types.Transaction) error {
if len(txns) == 0 {
return nil
}
from := txns[0].From
for _, txn := range txns {
// Since this is a single point of inclusion for new transactions both
// to the promoted queue and pending queue we use this point to calculate the hash
txn.ComputeHash()
err := t.validateTx(txn)
if err != nil {
return err
}
if txn.From == types.ZeroAddress {
txn.From, err = t.signer.Sender(txn)
if err != nil {
return fmt.Errorf("invalid sender")
}
from = txn.From
} else {
// only if we are in dev mode we can accept
// a transaction without validation
if !t.dev {
return fmt.Errorf("cannot accept non-encrypted txn")
}
}
t.logger.Debug("add txn", "ctx", ctx, "hash", txn.Hash, "from", from)
}
txnsQueue, ok := t.queue[from]
if !ok {
stateRoot := t.store.Header().StateRoot
// initialize the txn queue for the account
txnsQueue = newTxQueue()
txnsQueue.nextNonce = t.store.GetNonce(stateRoot, from)
t.queue[from] = txnsQueue
}
for _, txn := range txns {
txnsQueue.Add(txn)
}
for _, promoted := range txnsQueue.Promote() {
t.sorted.Push(promoted)
}
return nil
}// TxPool is a pool of transactions
type TxPool struct {
logger hclog.Logger
signer signer
store store
idlePeriod time.Duration
queue map[types.Address]*txQueue
sorted *txPriceHeap
// network stack
network *network.Server
topic *network.Topic
sealing bool
dev bool
NotifyCh chan struct{}
proto.UnimplementedTxnPoolOperatorServer
}2021-11-04T15:41:07.665+0100 [ERROR] polygon.consensus.dev: failed to write transaction: transaction's gas limit exceeds block gas limit

// Storage is a generic blockchain storage
type Storage interface {
ReadCanonicalHash(n uint64) (types.Hash, bool)
WriteCanonicalHash(n uint64, hash types.Hash) error
ReadHeadHash() (types.Hash, bool)
ReadHeadNumber() (uint64, bool)
WriteHeadHash(h types.Hash) error
WriteHeadNumber(uint64) error
WriteForks(forks []types.Hash) error
ReadForks() ([]types.Hash, error)
WriteDiff(hash types.Hash, diff *big.Int) error
ReadDiff(hash types.Hash) (*big.Int, bool)
WriteHeader(h *types.Header) error
ReadHeader(hash types.Hash) (*types.Header, error)
WriteCanonicalHeader(h *types.Header, diff *big.Int) error
WriteBody(hash types.Hash, body *types.Body) error
ReadBody(hash types.Hash) (*types.Body, error)
WriteSnapshot(hash types.Hash, blob []byte) error
ReadSnapshot(hash types.Hash) ([]byte, bool)
WriteReceipts(hash types.Hash, receipts []*types.Receipt) error
ReadReceipts(hash types.Hash) ([]*types.Receipt, error)
WriteTxLookup(hash types.Hash, blockHash types.Hash) error
ReadTxLookup(hash types.Hash) (types.Hash, bool)
Close() error
}// Prefixes for the key-value store
var (
// DIFFICULTY is the difficulty prefix
DIFFICULTY = []byte("d")
// HEADER is the header prefix
HEADER = []byte("h")
// HEAD is the chain head prefix
HEAD = []byte("o")
// FORK is the entry to store forks
FORK = []byte("f")
// CANONICAL is the prefix for the canonical chain numbers
CANONICAL = []byte("c")
// BODY is the prefix for bodies
BODY = []byte("b")
// RECEIPTS is the prefix for receipts
RECEIPTS = []byte("r")
// SNAPSHOTS is the prefix for snapshots
SNAPSHOTS = []byte("s")
// TX_LOOKUP_PREFIX is the prefix for transaction lookups
TX_LOOKUP_PREFIX = []byte("l")
)
// Sub-prefixes
var (
HASH = []byte("hash")
NUMBER = []byte("number")
EMPTY = []byte("empty")
)service System {
// GetInfo returns info about the client
rpc GetStatus(google.protobuf.Empty) returns (ServerStatus);
// PeersAdd adds a new peer
rpc PeersAdd(PeersAddRequest) returns (google.protobuf.Empty);
// PeersList returns the list of peers
rpc PeersList(google.protobuf.Empty) returns (PeersListResponse);
// PeersInfo returns the info of a peer
rpc PeersStatus(PeersStatusRequest) returns (Peer);
// Subscribe subscribes to blockchain events
rpc Subscribe(google.protobuf.Empty) returns (stream BlockchainEvent);
}type State interface {
// Gets a snapshot for a specific hash
NewSnapshotAt(types.Hash) (Snapshot, error)
// Gets the latest snapshot
NewSnapshot() Snapshot
// Gets the codeHash
GetCode(hash types.Hash) ([]byte, bool)
}type Snapshot interface {
// Gets a specific value for a leaf
Get(k []byte) ([]byte, bool)
// Commits new information
Commit(objs []*Object) (Snapshot, []byte)
}// Object is the serialization of the radix object
type Object struct {
Address types.Address
CodeHash types.Hash
Balance *big.Int
Root types.Hash
Nonce uint64
Deleted bool
DirtyCode bool
Code []byte
Storage []*StorageObject
}func (t *Transition) apply(msg *types.Transaction) ([]byte, uint64, bool, error) {
// check if there is enough gas in the pool
if err := t.subGasPool(msg.Gas); err != nil {
return nil, 0, false, err
}
txn := t.state
s := txn.Snapshot()
gas, err := t.preCheck(msg)
if err != nil {
return nil, 0, false, err
}
if gas > msg.Gas {
return nil, 0, false, errorVMOutOfGas
}
gasPrice := new(big.Int).SetBytes(msg.GetGasPrice())
value := new(big.Int).SetBytes(msg.Value)
// Set the specific transaction fields in the context
t.ctx.GasPrice = types.BytesToHash(msg.GetGasPrice())
t.ctx.Origin = msg.From
var subErr error
var gasLeft uint64
var returnValue []byte
if msg.IsContractCreation() {
_, gasLeft, subErr = t.Create2(msg.From, msg.Input, value, gas)
} else {
txn.IncrNonce(msg.From)
returnValue, gasLeft, subErr = t.Call2(msg.From, *msg.To, msg.Input, value, gas)
}
if subErr != nil {
if subErr == runtime.ErrNotEnoughFunds {
txn.RevertToSnapshot(s)
return nil, 0, false, subErr
}
}
gasUsed := msg.Gas - gasLeft
refund := gasUsed / 2
if refund > txn.GetRefund() {
refund = txn.GetRefund()
}
gasLeft += refund
gasUsed -= refund
// refund the sender
remaining := new(big.Int).Mul(new(big.Int).SetUint64(gasLeft), gasPrice)
txn.AddBalance(msg.From, remaining)
// pay the coinbase
coinbaseFee := new(big.Int).Mul(new(big.Int).SetUint64(gasUsed), gasPrice)
txn.AddBalance(t.ctx.Coinbase, coinbaseFee)
// return gas to the pool
t.addGasPool(gasLeft)
return returnValue, gasUsed, subErr != nil, nil
}func init() {
// unsigned arithmetic operations
register(STOP, handler{opStop, 0, 0})
register(ADD, handler{opAdd, 2, 3})
register(SUB, handler{opSub, 2, 3})
register(MUL, handler{opMul, 2, 5})
register(DIV, handler{opDiv, 2, 5})
register(SDIV, handler{opSDiv, 2, 5})
register(MOD, handler{opMod, 2, 5})
register(SMOD, handler{opSMod, 2, 5})
register(EXP, handler{opExp, 2, 10})
...
// jumps
register(JUMP, handler{opJump, 1, 8})
register(JUMPI, handler{opJumpi, 2, 10})
register(JUMPDEST, handler{opJumpDest, 0, 1})
}
// Run executes the virtual machine
func (c *state) Run() ([]byte, error) {
var vmerr error
codeSize := len(c.code)
for !c.stop {
if c.ip >= codeSize {
c.halt()
break
}
op := OpCode(c.code[c.ip])
inst := dispatchTable[op]
if inst.inst == nil {
c.exit(errOpCodeNotFound)
break
}
// check if the depth of the stack is enough for the instruction
if c.sp < inst.stack {
c.exit(errStackUnderflow)
break
}
// consume the gas of the instruction
if !c.consumeGas(inst.gas) {
c.exit(errOutOfGas)
break
}
// execute the instruction
inst.inst(c)
// check if stack size exceeds the max size
if c.sp > stackSize {
c.exit(errStackOverflow)
break
}
c.ip++
}
if err := c.err; err != nil {
vmerr = err
}
return c.ret, vmerr
}func NewServer(logger hclog.Logger, config *Config) (*Server, error) {
m := &Server{
logger: logger,
config: config,
chain: config.Chain,
grpcServer: grpc.NewServer(),
}
m.logger.Info("Data dir", "path", config.DataDir)
// Generate all the paths in the dataDir
if err := setupDataDir(config.DataDir, dirPaths); err != nil {
return nil, fmt.Errorf("failed to create data directories: %v", err)
}
// Get the private key for the node
keystore := keystore.NewLocalKeystore(filepath.Join(config.DataDir, "keystore"))
key, err := keystore.Get()
if err != nil {
return nil, fmt.Errorf("failed to read private key: %v", err)
}
m.key = key
storage, err := leveldb.NewLevelDBStorage(filepath.Join(config.DataDir, "blockchain"), logger)
if err != nil {
return nil, err
}
m.storage = storage
// Setup consensus
if err := m.setupConsensus(); err != nil {
return nil, err
}
stateStorage, err := itrie.NewLevelDBStorage(filepath.Join(m.config.DataDir, "trie"), logger)
if err != nil {
return nil, err
}
st := itrie.NewState(stateStorage)
m.state = st
executor := state.NewExecutor(config.Chain.Params, st)
executor.SetRuntime(precompiled.NewPrecompiled())
executor.SetRuntime(evm.NewEVM())
// Blockchain object
m.blockchain, err = blockchain.NewBlockchain(logger, storage, config.Chain, m.consensus, executor)
if err != nil {
return nil, err
}
executor.GetHash = m.blockchain.GetHashHelper
// Setup sealer
sealerConfig := &sealer.Config{
Coinbase: crypto.PubKeyToAddress(&m.key.PublicKey),
}
m.Sealer = sealer.NewSealer(sealerConfig, logger, m.blockchain, m.consensus, executor)
m.Sealer.SetEnabled(m.config.Seal)
// Setup the libp2p server
if err := m.setupLibP2P(); err != nil {
return nil, err
}
// Setup the GRPC server
if err := m.setupGRPC(); err != nil {
return nil, err
}
// Setup jsonrpc
if err := m.setupJSONRPC(); err != nil {
return nil, err
}
// Setup the syncer protocol
m.syncer = protocol.NewSyncer(logger, m.blockchain)
m.syncer.Register(m.libp2pServer.GetGRPCServer())
m.syncer.Start()
// Register the libp2p GRPC endpoints
proto.RegisterHandshakeServer(m.libp2pServer.GetGRPCServer(), &handshakeService{s: m})
m.libp2pServer.Serve()
return m, nil
}