/*
Copyright (C) BABEC. All rights reserved.
Copyright (C) THL A29 Limited, a Tencent company. All rights reserved.

SPDX-License-Identifier: Apache-2.0
*/

package scheduler

import (
	"crypto/sha256"
	"errors"
	"fmt"
	"regexp"
	"strconv"
	"sync"
	"time"

	ac "chainmaker.org/chainmaker-go/module/accesscontrol"
	"chainmaker.org/chainmaker-go/module/core/common/coinbasemgr"
	"chainmaker.org/chainmaker-go/module/core/provider/conf"
	"chainmaker.org/chainmaker/common/v3/crypto"
	"chainmaker.org/chainmaker/common/v3/json"
	"chainmaker.org/chainmaker/localconf/v3"
	"chainmaker.org/chainmaker/pb-go/v3/accesscontrol"
	commonPb "chainmaker.org/chainmaker/pb-go/v3/common"
	"chainmaker.org/chainmaker/pb-go/v3/config"
	"chainmaker.org/chainmaker/pb-go/v3/consensus"
	"chainmaker.org/chainmaker/pb-go/v3/syscontract"
	"chainmaker.org/chainmaker/protocol/v3"
	"chainmaker.org/chainmaker/utils/v3"
	"chainmaker.org/chainmaker/vm-native/v3/accountmgr"
	"chainmaker.org/chainmaker/vm/v3"
	"github.com/gogo/protobuf/proto"
	"github.com/hokaccha/go-prettyjson"
	"github.com/panjf2000/ants/v2"
	"github.com/prometheus/client_golang/prometheus"
)

const (
	// ScheduleTimeout schedule timeout
	ScheduleTimeout = 10
	// ScheduleWithDagTimeout  schedule with dag timeout
	ScheduleWithDagTimeout = 20
	// blockVersion2300 block version 2.3.0
	blockVersion2300 = uint32(2300)
	blockVersion2310 = uint32(2030100)
	blockVersion2312 = uint32(2030102)

	// blockVersion3000000 block version v3.0.0_alpha
	blockVersion3000000 = uint32(3000000)

	chargeGasVmForMultiAccountParameterKey = "charge_gas_vm_for_multi_account_senders"
)

const (
	// ErrMsgOfGasLimitNotSet error message
	ErrMsgOfGasLimitNotSet = "field `GasLimit` must be set in payload."
)

// TxScheduler transaction scheduler structure
type TxScheduler struct {
	// lock
	lock sync.Mutex
	// vm manager
	VmManager protocol.VmManager
	// schedule finish channel
	scheduleFinishC chan bool
	// logger
	log protocol.Logger
	// chain config
	chainConf protocol.ChainConf
	// metric vm runtime
	metricVMRunTime *prometheus.HistogramVec
	// store helper
	StoreHelper conf.StoreHelper
	// regexp
	keyReg *regexp.Regexp
	// signing member
	signer        protocol.SigningMember
	ledgerCache   protocol.LedgerCache
	contractCache *sync.Map
}

// Transaction dependency in adjacency table representation
type dagNeighbors map[int]struct{}

//// TxIdAndExecOrderType txid and ExecOrderTxType
//type TxIdAndExecOrderType struct {
//	string
//	protocol.ExecOrderTxType
//}

// Schedule according to a batch of transactions,
// and generating DAG according to the conflict relationship
// nolint: gocyclo, revive
func (ts *TxScheduler) Schedule(block *commonPb.Block, txBatch []*commonPb.Transaction,
	snapshot protocol.Snapshot) (map[string]*commonPb.TxRWSet, map[string][]*commonPb.ContractEvent, error) {

	ts.lock.Lock()
	defer ts.lock.Unlock()

	defer ts.releaseContractCache()

	var err error

	if err = ts.checkTBFTLastBLock(block); err != nil {
		return nil, nil, err
	}

	txBatchSize := len(txBatch)
	ts.log.Infof("schedule tx batch start, block %d, size = %d", block.Header.BlockHeight, txBatchSize)

	var goRoutinePool *ants.Pool
	poolCapacity := ts.StoreHelper.GetPoolCapacity()
	ts.log.Debugf("GetPoolCapacity() => %v", poolCapacity)
	if goRoutinePool, err = ants.NewPool(poolCapacity, ants.WithPreAlloc(false)); err != nil {
		return nil, nil, err
	}
	defer goRoutinePool.Release()

	timeoutC := time.After(ScheduleTimeout * time.Second)
	startTime := time.Now()

	runningTxC := make(chan *commonPb.Transaction, txBatchSize)
	finishC := make(chan bool)

	enableOptimizeChargeGas := coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf)
	enableSenderGroup := ts.chainConf.ChainConfig().Core.EnableSenderGroup
	enableConflictsBitWindow, conflictsBitWindow := ts.initOptimizeTools(txBatch)
	//txAddressCache := make(map[string]string, len(txBatch))
	var senderGroup *SenderGroup
	var senderCollection *SenderCollection
	if enableOptimizeChargeGas {
		ts.log.Debugf("before prepare `SenderCollection` ")
		senderCollection = ts.NewSenderCollection(txBatch, snapshot)
		ts.log.Infof("SenderCollection has %d special txs.", len(senderCollection.specialTxTable))
		ts.log.Debugf("end prepare `SenderCollection` ")
	} else if enableSenderGroup {
		ts.log.Debugf("before prepare `SenderGroup` ")
		senderGroup = NewSenderGroup(txBatch)
		ts.log.Debugf("end prepare `SenderGroup` ")
	}

	blockFingerPrint := string(utils.CalcBlockFingerPrintWithoutTx(block))
	ts.VmManager.BeforeSchedule(blockFingerPrint, block.Header.BlockHeight)
	defer ts.VmManager.AfterSchedule(blockFingerPrint, block.Header.BlockHeight)

	blockVersion := block.GetHeader().BlockVersion

	timeCostA0 := time.Since(startTime)
	ts.log.Infof("calculate SenderCollection end, time cost = %s", timeCostA0)
	// launch the go routine to dispatch tx to runningTxC
	go func() {
		ts.log.Infof("before Schedule(...) dispatch txs of block(%v)", block.Header.BlockHeight)
		if len(txBatch) == 0 {
			finishC <- true
		} else {
			ts.dispatchTxs(
				block,
				txBatch,
				runningTxC,
				goRoutinePool,
				enableOptimizeChargeGas,
				senderCollection,
				enableSenderGroup,
				senderGroup,
				enableConflictsBitWindow,
				conflictsBitWindow,
				snapshot,
				blockVersion)
		}
		ts.log.Infof("end Schedule(...) dispatch txs of block(%v)", block.Header.BlockHeight)
	}()

	// Put the pending transaction into the running queue
	parallelTxsNum := len(txBatch)
	if enableOptimizeChargeGas {
		parallelTxsNum = senderCollection.getParallelTxsNum()
		senderCollection.resetTotalGasUsed()
	}
	if parallelTxsNum > 0 {
		go func() {
			counter := 0
			for {
				select {
				case tx := <-runningTxC:
					ts.log.Debugf("prepare to submit running task for tx id:%s", tx.Payload.GetTxId())
					err = goRoutinePool.Submit(func() {
						handleTx(block, snapshot, ts, tx, runningTxC, finishC, goRoutinePool,
							parallelTxsNum, enableConflictsBitWindow, conflictsBitWindow,
							enableSenderGroup, senderGroup, senderCollection)
					})
					if err != nil {
						ts.log.Warnf("failed to submit running task, tx id:%s during schedule, %+v",
							tx.Payload.GetTxId(), err)
					}

				case <-timeoutC:
					ts.log.Debugf("Schedule(...) timeout ...")
					ts.scheduleFinishC <- true
					if !enableOptimizeChargeGas && enableSenderGroup {
						senderGroup.doneTxKeyC <- [32]byte{}
					}
					ts.log.Warnf("block [%d] schedule reached time limit", block.Header.BlockHeight)
					return
				case <-finishC:
					ts.log.Debugf("Schedule(...) finish ...")
					ts.scheduleFinishC <- true
					if !enableOptimizeChargeGas && enableSenderGroup {
						senderGroup.doneTxKeyC <- [32]byte{}
					}
					return
				}
				counter++
				ts.log.Debugf("schedule tx run %d times ... ", counter)
			}
		}()

		// Wait for schedule finish signal
		<-ts.scheduleFinishC
	}

	// Build DAG from read-write table
	snapshot.Seal()
	timeCostA1 := time.Since(startTime)
	ts.log.Infof("parallel scheduling end, time cost = %s, snapshot size = %v, ",
		timeCostA1, snapshot.GetSnapshotSize())
	block.Dag = snapshot.BuildDAG(ts.chainConf.ChainConfig().Contract.EnableSqlSupport, nil)
	timeCostDag := time.Since(startTime)

	// Execute special tx sequentially, and add to dag
	noBalanceTxsNum := 0
	if enableOptimizeChargeGas && senderCollection != nil {
		noBalanceTxsNum = len(senderCollection.specialTxTable)
	}
	serialTxs := make([]*commonPb.Transaction, 0, len(snapshot.GetSpecialTxTable())+noBalanceTxsNum)
	serialTxsNum := len(snapshot.GetTxTable())
	ts.log.Debugf("1) serialTxsNum = %v, noBalanceTxsNum = %v", serialTxsNum, noBalanceTxsNum)
	iterTxsNum := len(snapshot.GetSpecialTxTable())
	if iterTxsNum > 0 {
		ts.log.Infof("append txs[iter] into dag, size = %v", iterTxsNum)
		serialTxs = append(serialTxs, snapshot.GetSpecialTxTable()...)
		serialTxsNum += iterTxsNum
	}
	if enableOptimizeChargeGas && senderCollection != nil {
		if noBalanceTxsNum > 0 {
			ts.log.Infof("append txs[no balance] into dag, size = %v", noBalanceTxsNum)
			serialTxs = append(serialTxs, senderCollection.specialTxTable...)
			serialTxsNum += noBalanceTxsNum
		}
	}
	ts.log.Debugf("2) serialTxsNum = %v", serialTxsNum)
	if len(serialTxs) > 0 {
		ts.simulateSpecialTxs(
			serialTxs,
			block.Dag, snapshot, block, serialTxsNum, senderCollection)
	}

	timeCostA2 := time.Since(startTime)
	ts.log.Infof("serial scheduling end, time cost = %v", timeCostA2)

	// 添加gas交易或coinbase交易
	var txAddressCache map[string]string
	if enableOptimizeChargeGas {
		txAddressCache = senderCollection.txAddressCache
	}
	ts.addChargeGasTxOrCoinbaseTx(blockVersion, block, snapshot, txAddressCache, enableOptimizeChargeGas)

	timeCostB := time.Since(startTime)
	ts.log.Infof("schedule tx batch finished, block %d, success %d, txs execution cost %v, "+
		"dag building cost %v, special tx cost %v, coinbase cost %v, total used %v, tps %v",
		block.Header.BlockHeight, len(block.Dag.Vertexes),
		timeCostA1, timeCostDag-timeCostA1, timeCostA2-timeCostDag, timeCostB-timeCostA2, timeCostB,
		float64(len(block.Dag.Vertexes))/(float64(timeCostB)/1e9))

	txRWSetMap := ts.getTxRWSetTable(snapshot, block)
	contractEventMap := ts.getContractEventMap(block)

	return txRWSetMap, contractEventMap, nil
}

func (ts *TxScheduler) addChargeGasTxOrCoinbaseTx(
	blockVersion uint32, block *commonPb.Block, snapshot protocol.Snapshot,
	addressCache map[string]string, enableOptimizeChargeGas bool) {

	if snapshot.GetSnapshotSize() <= 0 {
		return
	}

	// 软分叉处理，240版本后gas交易变更为coinbase交易
	if blockVersion >= blockVersion3000000 {
		//dpos或开启gas时，启用coinbase
		if coinbasemgr.CheckCoinbaseEnable(ts.chainConf) {
			ts.log.DebugDynamic(func() string {
				return "append coinbase tx to block ..."
			})
			//添加coinbaseTx到区块中，并修改dag
			ts.appendCoinbaseTx(block, addressCache, snapshot)
			block.Header.BlockType = block.Header.BlockType | commonPb.BlockType_HAS_COINBASE
		}

		return
	}

	if enableOptimizeChargeGas {
		ts.log.DebugDynamic(func() string {
			return "append charge gas tx to block ..."
		})
		ts.appendChargeGasTx(block, addressCache, snapshot)
		// gas交易没有对应的blockType
	}
}

func (ts *TxScheduler) checkTBFTLastBLock(block *commonPb.Block) error {
	var err error
	lastCommittedHeight, err := ts.ledgerCache.CurrentHeight()
	if err != nil {
		return err
	}

	if ts.chainConf.ChainConfig().Consensus.Type == consensus.ConsensusType_TBFT &&
		int64(block.Header.BlockHeight)-int64(lastCommittedHeight) < 1 {
		return fmt.Errorf("no need to schedule old block, ledger height: %d, block height: %d",
			lastCommittedHeight, block.Header.BlockHeight)
	}
	return nil
}

// fillGasBalanceErrDag adds dependency relationships to the Directed Acyclic Graph (DAG)
// for transactions with gas balance errors in a given block.
func fillGasBalanceErrDag(block *commonPb.Block, snapshot protocol.Snapshot, blockVersion uint32) {

	// Skip adding dependency relationships for gas balance errors before block version 300
	if blockVersion < blockVersion3000000 {
		return
	}

	// gasBalanceErrors keeps track of the transactions with gas balance errors
	gasBalanceErrors := make(map[int]struct{})
	for ie, tx := range block.Txs {
		if tx.Result != nil && tx.Result.Code == commonPb.TxStatusCode_GAS_BALANCE_NOT_ENOUGH_FAILED {

			_, ok := gasBalanceErrors[ie]
			if !ok {
				gasBalanceErrors[ie] = struct{}{}
			}
		}
	}

	// add dependency relationships to the DAG for gas balance error transactions
	for index := range gasBalanceErrors {
		dagNeighbors := &commonPb.DAG_Neighbor{
			Neighbors: make([]uint32, 0, snapshot.GetSnapshotSize()-1),
		}
		for i := uint32(0); i < uint32(snapshot.GetSnapshotSize()-1); i++ {
			if _, ok := gasBalanceErrors[int(i)]; !ok {
				dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, i)
			}
		}
		block.Dag.Vertexes[index] = dagNeighbors
	}
}

// handleTx: run tx and apply tx sim context to snapshot
func handleTx(block *commonPb.Block, snapshot protocol.Snapshot,
	ts *TxScheduler, tx *commonPb.Transaction,
	runningTxC chan *commonPb.Transaction, finishC chan bool,
	goRoutinePool *ants.Pool, txBatchSize int,
	enableConflictsBitWindow bool, conflictsBitWindow *ConflictsBitWindow,
	enableSenderGroup bool, senderGroup *SenderGroup, collection *SenderCollection) {

	// If snapshot is sealed, no more transaction will be added into snapshot
	if snapshot.IsSealed() {
		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("handleTx(`%v`) snapshot has already sealed.", tx.GetPayload().TxId)
		})
		return
	}
	var start time.Time
	if localconf.ChainMakerConfig.MonitorConfig.Enabled {
		start = time.Now()
	}

	// execute tx, and get
	// 1) the read/write set
	// 2) the result that telling if the invoke success.
	txSimContext, specialTxType, runVmSuccess := ts.executeTx(tx, snapshot, block, collection)
	tx.Result = txSimContext.GetTxResult()
	ts.log.DebugDynamic(func() string {
		return fmt.Sprintf("handleTx(`%v`) => executeTx(...) => runVmSuccess = %v", tx.GetPayload().TxId, runVmSuccess)
	})
	if specialTxType == protocol.ExecOrderTxTypeIterator && collection != nil {
		txNeedChargeGas := ts.checkNativeFilter(txSimContext.GetBlockVersion(),
			tx.Payload.ContractName,
			tx.Payload.Method,
			tx,
			txSimContext.GetSnapshot())
		if err := collection.refundGasInSenderCollection(tx, tx.Result, txNeedChargeGas); err != nil {
			ts.log.DebugDynamic(func() string {
				return fmt.Sprintf("refundGasInSenderCollection failed, err = %v", err)
			})
		}
	}

	// Apply failed means this tx's read set conflict with other txs' write set
	applyResult, applySize := snapshot.ApplyTxSimContext(txSimContext, specialTxType,
		runVmSuccess, false)
	ts.log.DebugDynamic(func() string {
		return fmt.Sprintf("handleTx(`%v`) => ApplyTxSimContext(...) => snapshot.txTable = %v, applySize = %v",
			tx.GetPayload().TxId, len(snapshot.GetTxTable()), applySize)
	})

	// reduce the conflictsBitWindow size to eliminate the read/write set conflict
	if !applyResult {
		if enableConflictsBitWindow {
			ts.adjustPoolSize(goRoutinePool, conflictsBitWindow, ConflictTx)
		}

		runningTxC <- tx

		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("apply to snapshot failed, tx id:%s, result:%+v, apply count:%d",
				tx.Payload.GetTxId(), txSimContext.GetTxResult(), applySize)
		})

	} else {
		ts.handleApplyResult(enableConflictsBitWindow, enableSenderGroup,
			conflictsBitWindow, senderGroup, goRoutinePool, tx, start)

		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("apply to snapshot success, tx id:%s, result:%+v, apply count:%d",
				tx.Payload.GetTxId(), txSimContext.GetTxResult(), applySize)
		})
	}
	// If all transactions have been successfully added to dag
	if applySize >= txBatchSize {
		finishC <- true
	}
}

// initOptimizeTools init optimize tools
func (ts *TxScheduler) initOptimizeTools(
	txBatch []*commonPb.Transaction) (bool, *ConflictsBitWindow) {
	txBatchSize := len(txBatch)
	var conflictsBitWindow *ConflictsBitWindow
	enableConflictsBitWindow := ts.chainConf.ChainConfig().Core.EnableConflictsBitWindow

	ts.log.Infof("enable conflicts bit window: [%t]", enableConflictsBitWindow)

	if AdjustWindowSize*MinAdjustTimes > txBatchSize {
		enableConflictsBitWindow = false
	}
	if enableConflictsBitWindow {
		conflictsBitWindow = NewConflictsBitWindow(txBatchSize)
	}

	return enableConflictsBitWindow, conflictsBitWindow
}

// send txs from sender group
func (ts *TxScheduler) sendTxBySenderGroup(conflictsBitWindow *ConflictsBitWindow, senderGroup *SenderGroup,
	runningTxC chan *commonPb.Transaction, enableConflictsBitWindow bool) {
	// first round
	for _, v := range senderGroup.txsMap {
		runningTxC <- v[0]
	}
	// solve done tx channel
	for {
		k := <-senderGroup.doneTxKeyC
		if k == [32]byte{} {
			return
		}
		senderGroup.txsMap[k] = senderGroup.txsMap[k][1:]
		if len(senderGroup.txsMap[k]) > 0 {
			runningTxC <- senderGroup.txsMap[k][0]
		} else {
			delete(senderGroup.txsMap, k)
			if enableConflictsBitWindow {
				conflictsBitWindow.setMaxPoolCapacity(len(senderGroup.txsMap))
			}
		}
	}
}

// handleApplyResult apply the read/write set to txSimContext,
// and adjust the go routine size
func (ts *TxScheduler) handleApplyResult(enableConflictsBitWindow bool, enableSenderGroup bool,
	conflictsBitWindow *ConflictsBitWindow, senderGroup *SenderGroup, goRoutinePool *ants.Pool,
	tx *commonPb.Transaction, start time.Time) {
	if enableConflictsBitWindow {
		ts.adjustPoolSize(goRoutinePool, conflictsBitWindow, NormalTx)
	}
	if localconf.ChainMakerConfig.MonitorConfig.Enabled {
		elapsed := time.Since(start)
		ts.metricVMRunTime.WithLabelValues(tx.Payload.ChainId).Observe(elapsed.Seconds())
	}
	if enableSenderGroup {
		hashKey, _ := getSenderHashKey(tx)
		senderGroup.doneTxKeyC <- hashKey
	}
}

// getTxRWSetTable get tx rw set table, return tx rw set map
func (ts *TxScheduler) getTxRWSetTable(snapshot protocol.Snapshot, block *commonPb.Block) map[string]*commonPb.TxRWSet {
	block.Txs = snapshot.GetTxTable()
	txRWSetTable := snapshot.GetTxRWSetTable()
	txRWSetMap := make(map[string]*commonPb.TxRWSet, len(txRWSetTable))
	for _, txRWSet := range txRWSetTable {
		if txRWSet != nil {
			txRWSetMap[txRWSet.TxId] = txRWSet
		}
	}
	//ts.dumpDAG(block.Dag, block.Txs)
	if localconf.ChainMakerConfig.SchedulerConfig.RWSetLog {
		result, _ := prettyjson.Marshal(txRWSetMap)
		ts.log.Infof("schedule rwset :%s, dag:%+v", result, block.Dag)
	}
	return txRWSetMap
}

// getContractEventMap get contract event map
func (ts *TxScheduler) getContractEventMap(block *commonPb.Block) map[string][]*commonPb.ContractEvent {
	contractEventMap := make(map[string][]*commonPb.ContractEvent, len(block.Txs))
	for _, tx := range block.Txs {
		event := tx.Result.ContractResult.ContractEvent
		contractEventMap[tx.Payload.TxId] = event
	}
	return contractEventMap
}

// SimulateWithDag based on the dag in the block, perform scheduling and execution transactions
func (ts *TxScheduler) SimulateWithDag(block *commonPb.Block, snapshot protocol.Snapshot) (
	map[string]*commonPb.TxRWSet, map[string]*commonPb.Result, error) {
	ts.lock.Lock()
	defer ts.lock.Unlock()

	defer ts.releaseContractCache()

	var (
		startTime  = time.Now()
		txRWSetMap = make(map[string]*commonPb.TxRWSet, len(block.Txs))
	)
	if block.Header.BlockVersion >= blockVersion2300 && len(block.Txs) != len(block.Dag.Vertexes) {
		ts.log.Warnf("found dag size mismatch txs length in "+
			"block[%x] dag:%d, txs:%d", block.Header.BlockHash, len(block.Dag.Vertexes), len(block.Txs))
		return nil, nil, fmt.Errorf("found dag size mismatch txs length in "+
			"block[%x] dag:%d, txs:%d", block.Header.BlockHash, len(block.Dag.Vertexes), len(block.Txs))
	}
	if len(block.Txs) == 0 {
		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("no txs in block[%x] when simulate", block.Header.BlockHash)
		})
		return txRWSetMap, snapshot.GetTxResultMap(), nil
	}
	ts.log.Infof("simulate with dag start, size %d", len(block.Txs))
	txMapping := make(map[int]*commonPb.Transaction, len(block.Txs))
	for index, tx := range block.Txs {
		txMapping[index] = tx
	}

	// Construct the adjacency list of dag, which describes the subsequent adjacency transactions of all transactions
	dag := block.Dag
	txIndexBatch, dagRemain, reverseDagRemain, err := ts.initSimulateDag(dag)
	if err != nil {
		ts.log.Warnf("initialize simulate dag error:%s", err)
		return nil, nil, err
	}

	var senderCollection *SenderCollection
	//txAddressCache := make(map[string]string, snapshot.GetSnapshotSize())
	enableOptimizeChargeGas := coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf)

	if enableOptimizeChargeGas {
		ts.log.Debugf("before prepare `SenderCollection` ")
		senderCollection = ts.NewSenderCollection(block.Txs, snapshot)
		// reset totalGasUsed for recalculating txs in block
		senderCollection.resetTotalGasUsed()
		ts.log.Debugf("end prepare `SenderCollection` ")
	}

	txBatchSize := len(dag.Vertexes)
	runningTxC := make(chan int, txBatchSize)
	doneTxC := make(chan int, txBatchSize)

	timeoutC := time.After(ScheduleWithDagTimeout * time.Second)
	finishC := make(chan bool)

	var goRoutinePool *ants.Pool
	if goRoutinePool, err = ants.NewPool(len(block.Txs), ants.WithPreAlloc(true)); err != nil {
		return nil, nil, err
	}
	defer goRoutinePool.Release()

	ts.log.DebugDynamic(func() string {
		return fmt.Sprintf("block [%d] simulate with dag first batch size:%d, total batch size:%d",
			block.Header.BlockHeight, len(txIndexBatch), txBatchSize)
	})

	blockFingerPrint := string(utils.CalcBlockFingerPrintWithoutTx(block))
	ts.VmManager.BeforeSchedule(blockFingerPrint, block.Header.BlockHeight)
	defer ts.VmManager.AfterSchedule(blockFingerPrint, block.Header.BlockHeight)

	go func() {
		for _, tx := range txIndexBatch {
			runningTxC <- tx
		}
	}()
	go func() {
		for {
			select {
			case txIndex := <-runningTxC:
				tx := txMapping[txIndex]
				ts.log.Debugf("simulate with dag, prepare to submit running task for tx id:%s", tx.Payload.GetTxId())
				err = goRoutinePool.Submit(func() {
					handleTxInSimulateWithDag(block, snapshot, ts, tx, txIndex,
						doneTxC, finishC, txBatchSize, senderCollection)
				})
				if err != nil {
					ts.log.Warnf("failed to submit tx id %s during simulate with dag, %+v",
						tx.Payload.GetTxId(), err)
				}
			case doneTxIndex := <-doneTxC:
				txIndexBatchAfterShrink := ts.shrinkDag(doneTxIndex, dagRemain, reverseDagRemain)
				ts.log.Debugf("block [%d] simulate with dag, pop next tx index batch size:%d, dagRemain size:%d",
					block.Header.BlockHeight, len(txIndexBatchAfterShrink), len(dagRemain))
				for _, tx := range txIndexBatchAfterShrink {
					runningTxC <- tx
				}
			case <-finishC:
				ts.log.Debugf("block [%d] simulate with dag finish", block.Header.BlockHeight)
				ts.scheduleFinishC <- true
				return
			case <-timeoutC:
				ts.log.Errorf("block [%d] simulate with dag timeout", block.Header.BlockHeight)
				ts.scheduleFinishC <- true
				return
			}
		}
	}()

	<-ts.scheduleFinishC
	snapshot.Seal()
	timeUsed := time.Since(startTime)
	ts.log.Infof("simulate with dag finished, block %d, size %d, txs execution cost %v, tps %v",
		block.Header.BlockHeight, len(block.Txs), timeUsed, float64(len(block.Txs))/(float64(timeUsed)/1e9))

	// Return the read and write set after the scheduled execution
	for _, txRWSet := range snapshot.GetTxRWSetTable() {
		if txRWSet != nil {
			txRWSetMap[txRWSet.TxId] = txRWSet
		}
	}

	writeRWSetLog(txRWSetMap, block.Dag, ts.log)

	return txRWSetMap, snapshot.GetTxResultMap(), nil
}

func writeRWSetLog(txRWSetMap map[string]*commonPb.TxRWSet, dag *commonPb.DAG, logger protocol.Logger) {
	// local conf config logger rw set log
	if localconf.ChainMakerConfig.SchedulerConfig.RWSetLog {
		result, _ := prettyjson.Marshal(txRWSetMap)
		logger.Infof("simulate with dag rwset :%s, dag: %+v", result, dag)
	}
}

// initSimulateDag init simulate dag
func (ts *TxScheduler) initSimulateDag(dag *commonPb.DAG) (
	[]int, map[int]dagNeighbors, map[int]dagNeighbors, error) {
	dagRemain := make(map[int]dagNeighbors, len(dag.Vertexes))
	reverseDagRemain := make(map[int]dagNeighbors, len(dag.Vertexes)*4)
	var txIndexBatch []int
	for txIndex, neighbors := range dag.Vertexes {
		if neighbors == nil {
			return nil, nil, nil, fmt.Errorf("dag has nil neighbor")
		}
		if len(neighbors.Neighbors) == 0 {
			txIndexBatch = append(txIndexBatch, txIndex)
			continue
		}
		dn := make(dagNeighbors)
		for index, neighbor := range neighbors.Neighbors {
			if index > 0 {
				if neighbors.Neighbors[index-1] >= neighbor {
					return nil, nil, nil, fmt.Errorf("dag neighbors not strict increasing, neighbors: %v", neighbors.Neighbors)
				}
			}
			if int(neighbor) >= txIndex {
				return nil, nil, nil, fmt.Errorf("dag has neighbor >= txIndex, txIndex: %d, neighbor: %d", txIndex, neighbor)
			}
			dn[int(neighbor)] = struct{}{}
			if _, ok := reverseDagRemain[int(neighbor)]; !ok {
				reverseDagRemain[int(neighbor)] = make(dagNeighbors)
			}
			reverseDagRemain[int(neighbor)][txIndex] = struct{}{}
		}
		dagRemain[txIndex] = dn
	}
	return txIndexBatch, dagRemain, reverseDagRemain, nil
}

func handleTxInSimulateWithDag(
	block *commonPb.Block, snapshot protocol.Snapshot,
	ts *TxScheduler, tx *commonPb.Transaction, txIndex int,
	doneTxC chan int, finishC chan bool, txBatchSize int,
	collection *SenderCollection) {
	txSimContext, specialTxType, runVmSuccess := ts.executeTx(tx, snapshot, block, collection)

	// if apply failed means this tx's read set conflict with other txs' write set
	applyResult, applySize := snapshot.ApplyTxSimContext(txSimContext, specialTxType, runVmSuccess, true)
	if !applyResult {
		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("failed to apply snapshot for tx id:%s, shouldn't have its rwset", tx.Payload.TxId)
		})
		// apply fails in verification, make it done rather than retry it
		doneTxC <- txIndex
	} else {
		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("apply to snapshot for tx id:%s, result:%+v, apply count:%d, tx batch size:%d",
				tx.Payload.GetTxId(), txSimContext.GetTxResult(), applySize, txBatchSize)
		})
		doneTxC <- txIndex
	}
	// If all transactions in current batch have been successfully added to dag
	if applySize >= txBatchSize {
		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("finished 1 batch, apply size:%d, tx batch size:%d", applySize, txBatchSize)
		})
		finishC <- true
	}
}

// adjustPoolSize adjust pool size
func (ts *TxScheduler) adjustPoolSize(pool *ants.Pool, conflictsBitWindow *ConflictsBitWindow, txExecType TxExecType) {
	newPoolSize := conflictsBitWindow.Enqueue(txExecType, pool.Cap())
	if newPoolSize == -1 {
		return
	}
	pool.Tune(newPoolSize)
}

// executeTx execute tx
func (ts *TxScheduler) executeTx(
	tx *commonPb.Transaction, snapshot protocol.Snapshot, block *commonPb.Block, collection *SenderCollection) (
	protocol.TxSimContext, protocol.ExecOrderTxType, bool) {
	txSimContext := vm.NewTxSimContext(ts.VmManager, snapshot, tx, block.Header.BlockVersion, ts.log)
	ts.log.DebugDynamic(func() string {
		return fmt.Sprintf("NewTxSimContext finished for tx id:%s", tx.Payload.GetTxId())
	})

	runVmSuccess := true
	var txResult *commonPb.Result
	var err error
	var specialTxType protocol.ExecOrderTxType

	enableGas := ts.checkGasEnable()
	enableOptimizeChargeGas := coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf)
	blockVersion := block.GetHeader().BlockVersion

	txNeedChargeGas := ts.checkNativeFilter(txSimContext.GetBlockVersion(),
		tx.Payload.ContractName,
		tx.Payload.Method,
		tx,
		txSimContext.GetSnapshot()) && (enableOptimizeChargeGas || enableGas)
	// 300后，修复原gas余额不足的错误处理
	if blockVersion >= blockVersion3000000 {
		specialTxType, runVmSuccess = ts.executeTx3000(
			tx, snapshot, block, collection, enableOptimizeChargeGas, txNeedChargeGas, txSimContext)
		return txSimContext, specialTxType, runVmSuccess
	} else if blockVersion >= 2300 {
		if txNeedChargeGas {
			addr := ts.getGasAddress(tx, txSimContext, snapshot)
			if !ts.guardForExecuteTx2300(
				tx, txSimContext, enableOptimizeChargeGas, addr, commonPb.TxStatusCode_SUCCESS, false) {
				return txSimContext, protocol.ExecOrderTxTypeNormal, false
			}
		}

		ts.log.Debugf("run vm start for tx:%s", tx.Payload.GetTxId())
		if txResult, specialTxType, err = ts.runVM2300(tx, txSimContext, enableOptimizeChargeGas); err != nil {
			runVmSuccess = false
			ts.log.Errorf("failed to run vm for tx id:%s,contractName:%s, tx result:%+v, error:%+v",
				tx.Payload.GetTxId(), tx.Payload.ContractName, txResult, err)
		}

	} else if blockVersion >= 2220 {
		if !ts.guardForExecuteTx2220(tx, txSimContext, enableGas, enableOptimizeChargeGas) {
			return txSimContext, protocol.ExecOrderTxTypeNormal, false
		}

		ts.log.Debugf("run vm start for tx:%s", tx.Payload.GetTxId())
		if txResult, specialTxType, err = ts.runVM2220(tx, txSimContext, enableOptimizeChargeGas); err != nil {
			runVmSuccess = false
			//合约运行失败是常态，不需要ERROR级别的日志，Warn级别就行了
			ts.log.Warnf("failed to run vm for tx id:%s,contractName:%s, tx result:%+v, error:%+v",
				tx.Payload.GetTxId(), tx.Payload.ContractName, txResult, err)
		}
	} else {
		if txResult, specialTxType, err = ts.runVM2210(tx, txSimContext); err != nil {
			runVmSuccess = false
			ts.log.Errorf("failed to run vm for tx id:%s,contractName:%s, tx result:%+v, error:%+v",
				tx.Payload.GetTxId(), tx.Payload.ContractName, txResult, err)
		}
	}

	ts.log.Debugf("run vm finished for tx:%s, runVmSuccess:%v, txResult = %v ", tx.Payload.TxId, runVmSuccess, txResult)
	txSimContext.SetTxResult(txResult)
	return txSimContext, specialTxType, runVmSuccess
}

func (ts *TxScheduler) executeTx3000(tx *commonPb.Transaction, snapshot protocol.Snapshot, block *commonPb.Block,
	collection *SenderCollection, enableOptimizeChargeGas, txNeedChargeGas bool, txSimContext protocol.TxSimContext) (
	protocol.ExecOrderTxType, bool) {
	var (
		runVmSuccess    = true
		txResult        *commonPb.Result
		specialTxType   protocol.ExecOrderTxType
		err             error
		accountStatus   commonPb.TxStatusCode
		accountAbnormal bool
		addr            string
		exist           bool
	)
	if txNeedChargeGas && enableOptimizeChargeGas {
		addr, exist = collection.txAddressCache[tx.Payload.TxId]
		if !exist {
			ts.log.Warnf("cannot find account balance for %v", tx.Payload.TxId)
			return protocol.ExecOrderTxTypeNormal, false
		}
		col, exi := collection.txsMap[addr]
		if !exi {
			ts.log.Warnf("cannot find txCollect for tx %v", tx.Payload.TxId)
			return protocol.ExecOrderTxTypeNormal, false
		}
		if col.accountStatus != commonPb.TxStatusCode_SUCCESS {
			accountStatus = col.accountStatus
			accountAbnormal = true
		}
	}
	// checking for balance is not enough
	if enableOptimizeChargeGas && txNeedChargeGas && !accountAbnormal {
		if err2 := collection.checkBalanceInSenderCollection(tx, ts.log); err2 != nil {
			runVmSuccess = false
			txResult = &commonPb.Result{
				Code:    commonPb.TxStatusCode_CONTRACT_FAIL,
				Message: err2.Error(),
				ContractResult: &commonPb.ContractResult{
					Code:    1,
					Message: err2.Error(),
					GasUsed: uint64(0),
				},
			}
			ts.log.Debugf("run vm finished for tx:%s, runVmSuccess:%v, txResult = %v ",
				tx.Payload.TxId, runVmSuccess, txResult)
			txSimContext.SetTxResult(txResult)
			return protocol.ExecOrderTxTypeNormal, false
		}
	}

	if txNeedChargeGas && !ts.guardForExecuteTx2300(tx, txSimContext, enableOptimizeChargeGas, addr, accountStatus, true) {
		return protocol.ExecOrderTxTypeNormal, false
	}

	ts.log.Debugf("run vm start for tx:%s", tx.Payload.GetTxId())
	if txResult, specialTxType, err = ts.runVM2300(tx, txSimContext, enableOptimizeChargeGas); err != nil {
		runVmSuccess = false
		ts.log.Errorf("failed to run vm for tx id:%s,contractName:%s, tx result:%+v, error:%+v",
			tx.Payload.GetTxId(), tx.Payload.ContractName, txResult, err)
		txSimContext.SetTxResult(txResult)
	}
	if enableOptimizeChargeGas {
		txNeedChargingGas := txNeedChargeGas && tx.Payload.TxType == commonPb.TxType_INVOKE_CONTRACT
		ts.log.Debugf("txNeedChargingGas = %v", txNeedChargingGas)
		if gasCharged, err2 := collection.chargeGasInSenderCollection(tx, txResult, txNeedChargingGas); err2 != nil {
			runVmSuccess = false
			txResult = &commonPb.Result{
				Code:    commonPb.TxStatusCode_CONTRACT_FAIL,
				Message: err2.Error(),
				ContractResult: &commonPb.ContractResult{
					Code:    1,
					GasUsed: gasCharged,
					Message: err2.Error(),
				},
			}
		}
		ts.log.Debugf("run vm finished for tx:%s, runVmSuccess:%v, txResult = %v ", tx.Payload.TxId, runVmSuccess, txResult)
	}
	txSimContext.SetTxResult(txResult)
	return specialTxType, runVmSuccess
}

func (ts *TxScheduler) getGasAddress(tx *commonPb.Transaction,
	txSimContext protocol.TxSimContext, snapshot protocol.Snapshot) string {
	pk, _, _ := getPayerPkAndAddress(tx, snapshot)
	val, err, _ := sf.Do(txSimContext.GetBlockFingerprint(), func() (interface{}, error) {
		chainCfg, err := txSimContext.GetBlockchainStore().GetLastChainConfig()
		return chainCfg, err
	})
	if err != nil {
		ts.log.Errorf("get LastChainConfig error: %v", err)
		return ""
	}
	chainCfg, ok := val.(*config.ChainConfig)
	if !ok {
		ts.log.Errorf("failed to transfer chainConfig from interface to struct")
		return ""
	}
	//chainCfg := txSimContext.GetLastChainConfig()
	addr, _ := pkToGasAddress(pk, chainCfg)
	return addr
}

func (ts *TxScheduler) simulateSpecialTxs(specialTxs []*commonPb.Transaction,
	dag *commonPb.DAG, snapshot protocol.Snapshot, block *commonPb.Block,
	txBatchSize int, collection *SenderCollection) {
	specialTxsLen := len(specialTxs)
	var firstTx *commonPb.Transaction
	runningTxC := make(chan *commonPb.Transaction, specialTxsLen)
	//collection.printDebugInfo()
	scheduleFinishC := make(chan bool)
	timeoutC := time.After(ScheduleWithDagTimeout * time.Second)
	go func() {
		for _, tx := range specialTxs {
			runningTxC <- tx
		}
	}()

	go func() {
		for {
			select {
			case tx := <-runningTxC:
				// simulate tx
				txSimContext, specialTxType, runVmSuccess := ts.executeTx(tx, snapshot, block, collection)
				tx.Result = txSimContext.GetTxResult()
				// apply tx
				applyResult, applySize := snapshot.ApplyTxSimContext(txSimContext, specialTxType, runVmSuccess, true)
				if !applyResult {
					ts.log.Debugf("failed to apply according to dag with tx %s ", tx.Payload.TxId)
					runningTxC <- tx
					continue
				}
				if firstTx == nil {
					firstTx = tx
					dagNeighbors := &commonPb.DAG_Neighbor{
						Neighbors: make([]uint32, 0, snapshot.GetSnapshotSize()-1),
					}
					for i := uint32(0); i < uint32(snapshot.GetSnapshotSize()-1); i++ {
						dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, i)
					}
					dag.Vertexes = append(dag.Vertexes, dagNeighbors)
				} else {
					dagNeighbors := &commonPb.DAG_Neighbor{
						Neighbors: make([]uint32, 0, 1),
					}
					dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, uint32(snapshot.GetSnapshotSize())-2)
					dag.Vertexes = append(dag.Vertexes, dagNeighbors)
				}
				if applySize >= txBatchSize {
					ts.log.Debugf("block [%d] schedule special txs finished, apply size:%d, len of txs:%d, "+
						"len of special txs:%d", block.Header.BlockHeight, applySize, txBatchSize, specialTxsLen)
					scheduleFinishC <- true
					return
				}
			case <-timeoutC:
				ts.log.Errorf("block [%d] schedule special txs timeout", block.Header.BlockHeight)
				scheduleFinishC <- true
				return
			}
		}
	}()
	<-scheduleFinishC
}

// shrinkDag shrink dag
func (ts *TxScheduler) shrinkDag(txIndex int, dagRemain map[int]dagNeighbors,
	reverseDagRemain map[int]dagNeighbors) []int {
	var txIndexBatch []int
	for k := range reverseDagRemain[txIndex] {
		delete(dagRemain[k], txIndex)
		if len(dagRemain[k]) == 0 {
			txIndexBatch = append(txIndexBatch, k)
			delete(dagRemain, k)
		}
	}
	delete(reverseDagRemain, txIndex)
	return txIndexBatch
}

// Halt halt
func (ts *TxScheduler) Halt() {
	ts.scheduleFinishC <- true
}

// nolint: unused
func (ts *TxScheduler) dumpDAG(dag *commonPb.DAG, txs []*commonPb.Transaction) {
	dagString := "digraph DAG {\n"
	for i, ns := range dag.Vertexes {
		if len(ns.Neighbors) == 0 {
			dagString += fmt.Sprintf("id_%s -> begin;\n", txs[i].Payload.TxId[:8])
			continue
		}
		for _, n := range ns.Neighbors {
			dagString += fmt.Sprintf("id_%s -> id_%s;\n", txs[i].Payload.TxId[:8], txs[n].Payload.TxId[:8])
		}
	}
	dagString += "}"
	ts.log.Infof("Dump Dag: %s", dagString)
}

func (ts *TxScheduler) chargeGasLimit(accountMangerContract *commonPb.Contract, tx *commonPb.Transaction,
	txSimContext protocol.TxSimContext, contractName, method string, pk []byte,
	result *commonPb.Result) (re *commonPb.Result, err error) {
	if ts.checkGasEnable() &&
		ts.checkNativeFilter(txSimContext.GetBlockVersion(), contractName, method, tx, txSimContext.GetSnapshot()) &&
		tx.Payload.TxType == commonPb.TxType_INVOKE_CONTRACT {
		var code commonPb.TxStatusCode
		var runChargeGasContract *commonPb.ContractResult
		var limit uint64
		if tx.Payload.Limit == nil {
			err = errors.New("tx payload limit is nil")
			ts.log.Error(err.Error())
			result.Message = err.Error()
			return result, err
		}

		limit = tx.Payload.Limit.GasLimit
		chargeParameters := map[string][]byte{
			accountmgr.ChargePublicKey: pk,
			accountmgr.ChargeGasAmount: []byte(strconv.FormatUint(limit, 10)),
		}
		ts.log.Debugf("【chargeGasLimit】%v, pk = %s, amount = %v", tx.Payload.TxId, pk, limit)
		runChargeGasContract, _, code = ts.VmManager.RunContract(
			accountMangerContract, syscontract.GasAccountFunction_CHARGE_GAS.String(),
			nil, chargeParameters, txSimContext, 0, commonPb.TxType_INVOKE_CONTRACT)
		if code != commonPb.TxStatusCode_SUCCESS {
			result.Code = code
			result.ContractResult = runChargeGasContract
			return result, errors.New(runChargeGasContract.Message)
		}
	} else {
		ts.log.Debugf("%s:%s no need to charge gas.", contractName, method)
	}
	return result, nil
}

func (ts *TxScheduler) checkRefundGas(accountMangerContract *commonPb.Contract, tx *commonPb.Transaction,
	txSimContext protocol.TxSimContext, contractName, method string, pk []byte,
	result *commonPb.Result, contractResultPayload *commonPb.ContractResult, enableOptimizeChargeGas bool) error {

	// get tx's gas limit
	limit, err := getTxGasLimit(tx)
	if err != nil {
		ts.log.Errorf("getTxGasLimit error: %v", err)
		result.Message = err.Error()
		return err
	}

	// compare the gas used with gas limit
	if limit < contractResultPayload.GasUsed {
		err = fmt.Errorf("gas limit is not enough, [limit:%d]/[gasUsed:%d]",
			limit, contractResultPayload.GasUsed)
		ts.log.Error(err.Error())
		result.ContractResult.Code = uint32(commonPb.TxStatusCode_CONTRACT_FAIL)
		result.ContractResult.Message = err.Error()
		result.ContractResult.GasUsed = limit
		result.ContractResult.Result = nil
		result.ContractResult.ContractEvent = nil
		return err
	}

	if !enableOptimizeChargeGas {
		if _, err = ts.refundGas(accountMangerContract, tx, txSimContext, contractName, method, pk, result,
			contractResultPayload); err != nil {
			ts.log.Errorf("refund gas err is %v", err)
			if txSimContext.GetBlockVersion() >= blockVersion2300 {
				result.Code = commonPb.TxStatusCode_INTERNAL_ERROR
				result.Message = err.Error()
				result.ContractResult.Code = uint32(1)
				result.ContractResult.Message = err.Error()
				result.ContractResult.ContractEvent = nil
				return err
			}
		}
	}

	return nil
}

func (ts *TxScheduler) refundGas(accountMangerContract *commonPb.Contract, tx *commonPb.Transaction,
	txSimContext protocol.TxSimContext, contractName, method string, pk []byte,
	result *commonPb.Result, contractResultPayload *commonPb.ContractResult) (re *commonPb.Result, err error) {
	if ts.checkGasEnable() &&
		ts.checkNativeFilter(txSimContext.GetBlockVersion(), contractName, method, tx, txSimContext.GetSnapshot()) &&
		tx.Payload.TxType == commonPb.TxType_INVOKE_CONTRACT {
		var code commonPb.TxStatusCode
		var refundGasContract *commonPb.ContractResult
		var limit uint64
		if tx.Payload.Limit == nil {
			err = errors.New("tx payload limit is nil")
			ts.log.Error(err.Error())
			result.Message = err.Error()
			return result, err
		}

		limit = tx.Payload.Limit.GasLimit
		if limit < contractResultPayload.GasUsed {
			err = fmt.Errorf("gas limit is not enough, [limit:%d]/[gasUsed:%d]", limit, contractResultPayload.GasUsed)
			ts.log.Error(err.Error())
			result.Message = err.Error()
			return result, err
		}

		refundGas := limit - contractResultPayload.GasUsed
		ts.log.Debugf("refund gas [%d], gas used [%d]", refundGas, contractResultPayload.GasUsed)

		if refundGas == 0 {
			return result, nil
		}

		refundGasParameters := map[string][]byte{
			accountmgr.RechargeKey:       pk,
			accountmgr.RechargeAmountKey: []byte(strconv.FormatUint(refundGas, 10)),
		}

		refundGasContract, _, code = ts.VmManager.RunContract(
			accountMangerContract, syscontract.GasAccountFunction_REFUND_GAS_VM.String(),
			nil, refundGasParameters, txSimContext, 0, commonPb.TxType_INVOKE_CONTRACT)
		if code != commonPb.TxStatusCode_SUCCESS {
			result.Code = code
			result.ContractResult = refundGasContract
			return result, errors.New(refundGasContract.Message)
		}
	}
	return result, nil
}

func (ts *TxScheduler) getAccountMgrContractAndPk(txSimContext protocol.TxSimContext, tx *commonPb.Transaction,
	contractName, method string) (accountMangerContract *commonPb.Contract, pk []byte, err error) {
	if ts.checkGasEnable() &&
		ts.checkNativeFilter(txSimContext.GetBlockVersion(), contractName, method, tx, txSimContext.GetSnapshot()) &&
		tx.Payload.TxType == commonPb.TxType_INVOKE_CONTRACT {
		ts.log.Debugf("getAccountMgrContractAndPk => txSimContext.GetContractByName(`%s`)",
			syscontract.SystemContract_ACCOUNT_MANAGER.String())
		accountMangerContract, err = txSimContext.GetContractByName(syscontract.SystemContract_ACCOUNT_MANAGER.String())
		if err != nil {
			ts.log.Error(err.Error())
			return nil, nil, err
		}

		pk, err = ts.getPayerPk(txSimContext, tx)
		if err != nil {
			ts.log.Error(err.Error())
			return accountMangerContract, nil, err
		}
		return accountMangerContract, pk, err
	}
	return nil, nil, nil
}

func (ts *TxScheduler) checkGasEnable() bool {
	if ts.chainConf.ChainConfig() != nil && ts.chainConf.ChainConfig().AccountConfig != nil {
		ts.log.Debugf("chain config account config enable gas is:%v", ts.chainConf.ChainConfig().AccountConfig.EnableGas)
		return ts.chainConf.ChainConfig().AccountConfig.EnableGas
	}
	return false
}

// checkNativeFilter use snapshot instead of blockchainStore
func (ts *TxScheduler) checkNativeFilter(blockVersion uint32, contractName, method string,
	tx *commonPb.Transaction, snapshot protocol.Snapshot) bool {
	ts.log.Debugf("checkNativeFilter => contractName = %s, method = %s", contractName, method)

	// 用户合约，扣费
	if !utils.IsNativeContract(contractName) {
		return true
	}

	// add by Cai.Zhihong for compatible with v2.3.1.2
	if blockVersion < blockVersion2312 {
		// install & upgrade 系统合约扣费
		if method == syscontract.ContractManageFunction_INIT_CONTRACT.String() ||
			method == syscontract.ContractManageFunction_UPGRADE_CONTRACT.String() {
			return true
		}

		return ts.checkMultiSignFilterOld(contractName, method, tx, snapshot)
	}

	// install & upgrade 系统合约扣费
	if contractName == syscontract.SystemContract_CONTRACT_MANAGE.String() {
		if method == syscontract.ContractManageFunction_INIT_CONTRACT.String() ||
			method == syscontract.ContractManageFunction_UPGRADE_CONTRACT.String() {
			return true
		}
	}

	return ts.checkMultiSignFilter2312(contractName, method, tx, snapshot)
}

func (ts *TxScheduler) checkMultiSignFilterOld(
	contractName string, method string, tx *commonPb.Transaction, snapshot protocol.Snapshot) bool {
	if contractName == syscontract.SystemContract_MULTI_SIGN.String() &&
		method == syscontract.MultiSignFunction_TRIG.String() {
		if getMultiSignEnableManualRun(ts.chainConf.ChainConfig()) {
			var multiSignReqId []byte
			for _, kvpair := range tx.Payload.Parameters {
				if kvpair.Key == syscontract.MultiVote_TX_ID.String() {
					multiSignReqId = kvpair.Value
				}
			}
			multiSignInfoBytes, err := snapshot.GetKey(-1, contractName, multiSignReqId)
			if err != nil {
				ts.log.Errorf("read multi-sign failed, multiSignReqId = %v, err = %v", multiSignReqId, err)
				return true
			}

			multiSignInfo := &syscontract.MultiSignInfo{}
			err = proto.Unmarshal(multiSignInfoBytes, multiSignInfo)
			if err != nil {
				ts.log.Errorf("unmarshal MultiSignInfo failed, multiSignReqId = %v, err = %v", multiSignReqId, err)
				return true
			}

			var calleeContractName string
			var calleeMethod string
			for _, kvpair := range multiSignInfo.Payload.Parameters {
				if kvpair.Key == syscontract.MultiReq_SYS_CONTRACT_NAME.String() {
					calleeContractName = string(kvpair.Value)
				}
				if kvpair.Key == syscontract.MultiReq_SYS_METHOD.String() {
					calleeMethod = string(kvpair.Value)
				}
			}
			if calleeContractName == syscontract.SystemContract_CONTRACT_MANAGE.String() {
				if calleeMethod == syscontract.ContractManageFunction_INIT_CONTRACT.String() ||
					calleeMethod == syscontract.ContractManageFunction_UPGRADE_CONTRACT.String() {
					ts.log.Debugf("need charging gas, multiSignReqId = %v", multiSignReqId)
					return true
				}
			}
		}
	}
	return false
}

func (ts *TxScheduler) checkMultiSignFilter2312(
	contractName string, method string, tx *commonPb.Transaction, snapshot protocol.Snapshot) bool {
	return contractName == syscontract.SystemContract_MULTI_SIGN.String()
}

// todo: merge with getPayerPk
func getPayerPkAndAddress(tx *commonPb.Transaction, snapshot protocol.Snapshot) (
	crypto.PublicKey, string, error) {
	var err error
	signingMember := getTxPayerSigner(tx)
	if signingMember == nil {
		err = errors.New(" can not find payer from tx ")
		return nil, "", err
	}
	return ac.GetMemberPkAndAddress(signingMember, snapshot)
}

func (ts *TxScheduler) getPayerPk(txSimContext protocol.TxSimContext, tx *commonPb.Transaction) ([]byte, error) {

	var err error
	sender := getTxPayerSigner(tx)
	if sender == nil {
		err = errors.New(" can not find sender from tx ")
		ts.log.Error(err.Error())
		return nil, err
	}

	pk, _, err := ac.GetMemberPkAndAddress(sender, txSimContext.GetSnapshot())
	if err != nil {
		ts.log.Error(err.Error())
		return nil, err
	}
	pkStr, err := pk.String()
	if err != nil {
		ts.log.Error(err.Error())
		return nil, err
	}
	return []byte(pkStr), nil
}

// dispatchTxs dispatch txs from:
//  1. senderCollection when flag `enableOptimizeChargeGas` was set
//  2. senderGroup when flag `enableOptimizeChargeGas` was not set, and flag `enableSenderGroup` was set
//  3. txBatch directly where no flags was set
//     to runningTxC
func (ts *TxScheduler) dispatchTxs(
	block *commonPb.Block,
	txBatch []*commonPb.Transaction,
	runningTxC chan *commonPb.Transaction,
	goRoutinePool *ants.Pool,
	enableOptimizeChargeGas bool,
	senderCollection *SenderCollection,
	enableSenderGroup bool,
	senderGroup *SenderGroup,
	enableConflictsBitWindow bool,
	conflictsBitWindow *ConflictsBitWindow,
	snapshot protocol.Snapshot,
	blockVersion uint32) int {

	// 300后，针对gas优化下，balance不足的问题不再走原逻辑
	//if enableOptimizeChargeGas && blockVersion < blockVersion3000000 {
	if enableOptimizeChargeGas {
		ts.log.Debugf("before `SenderCollection` dispatch => ")
		ts.dispatchTxsInSenderCollection(block, senderCollection, runningTxC, snapshot)
		ts.log.Infof("SenderCollection has %d special txs", len(senderCollection.specialTxTable))
		ts.log.Debugf("end `SenderCollection` dispatch => ")
		return len(txBatch) - len(senderCollection.specialTxTable)

	} else if enableSenderGroup {
		ts.log.Debugf("before `SenderGroup` dispatch => ")
		if enableConflictsBitWindow {
			conflictsBitWindow.setMaxPoolCapacity(len(senderGroup.txsMap))
		}
		goRoutinePool.Tune(len(senderGroup.txsMap))
		ts.sendTxBySenderGroup(conflictsBitWindow, senderGroup, runningTxC, enableConflictsBitWindow)
		ts.log.Debugf("end `SenderGroup` dispatch => ")
		return len(txBatch)

	} else {
		ts.log.Debugf("before `Normal` dispatch => ")
		for _, tx := range txBatch {
			runningTxC <- tx
		}
		ts.log.Debugf("end `Normal` dispatch => ")
		return len(txBatch)
	}
}

// dispatchTxsInSenderCollection dispatch txs from senderCollection to runningTxC chan
// if the balance less than gas limit, set the result of tx and dispatch this tx.
// use snapshot for newest data
func (ts *TxScheduler) dispatchTxsInSenderCollection(
	block *commonPb.Block,
	senderCollection *SenderCollection,
	runningTxC chan *commonPb.Transaction,
	snapshot protocol.Snapshot) {

	ts.log.DebugDynamic(func() string {
		return "begin dispatchTxsInSenderCollection(...)"
	})
	for addr, txCollection := range senderCollection.txsMap {

		ts.log.DebugDynamic(func() string {
			return fmt.Sprintf("%v => {balance: %v, tx size: %v}",
				addr, txCollection.accountBalance, len(txCollection.txs))
		})

		for _, tx := range txCollection.txs {
			ts.log.DebugDynamic(func() string {
				return fmt.Sprintf("dispatch sender collection tx => %s", tx.Payload)
			})

			txNeedChargeGas := ts.checkNativeFilter(
				block.GetHeader().GetBlockVersion(),
				tx.GetPayload().ContractName,
				tx.GetPayload().Method,
				tx, snapshot)
			ts.log.DebugDynamic(func() string {
				return fmt.Sprintf("tx need charge gas => %v", txNeedChargeGas)
			})

			runningTxC <- tx
		}
	}
}

// appendChargeGasTx include 3 step:
// 1) create a new charging gas tx
// 2) execute tx by calling native contract
// 3) append tx to DAG struct
func (ts *TxScheduler) appendChargeGasTx(
	block *commonPb.Block,
	addressCache map[string]string,
	snapshot protocol.Snapshot) {
	ts.log.Debug("TxScheduler => appendChargeGasTx() => createChargeGasTx() begin ")
	tx, err := ts.createChargeGasTx(addressCache, snapshot)
	if err != nil {
		return
	}

	ts.log.Debug("TxScheduler => appendChargeGasTx() => executeGhargeGasTx() begin ")
	txSimContext := ts.executeChargeGasTx(tx, block, snapshot)
	tx.Result = txSimContext.GetTxResult()

	ts.log.Debug("TxScheduler => appendChargeGasTx() => appendChargeGasTxToDAG() begin ")
	ts.appendChargeGasTxToDAG(block.Dag, snapshot)
}

// appendCoinbaseTx include 3 step:
// 1) create a new coinbase tx
// 2) execute tx by calling native contract
// 3) append tx to DAG struct
func (ts *TxScheduler) appendCoinbaseTx(
	block *commonPb.Block,
	addressCache map[string]string,
	snapshot protocol.Snapshot) {
	ts.log.Debug("TxScheduler => appendCoinbaseTx() => createCoinbaseTx() begin ")
	//创建coinbase交易
	tx, err := ts.createCoinbaseTx(addressCache, snapshot)
	if err != nil {
		return
	}

	ts.log.Debug("TxScheduler => appendCoinbaseTx() => executeCoinbaseTx() begin ")
	//执行coinbase交易
	txSimContext := ts.executeCoinbaseTx(tx, block, snapshot)
	tx.Result = txSimContext.GetTxResult()

	ts.log.Debug("TxScheduler => appendCoinbaseTx() => appendCoinbaseToDAG() begin ")
	//coinbase交易添加到dag中
	ts.appendCoinbaseToDAG(block.Dag, snapshot)
}

// signTxPayload sign charging tx with node's private key
func (ts *TxScheduler) signTxPayload(
	payload *commonPb.Payload) ([]byte, error) {

	payloadBytes, err := proto.Marshal(payload)
	if err != nil {
		return nil, err
	}

	// using the default hash type of the chain
	hashType := ts.chainConf.ChainConfig().GetCrypto().Hash
	if ts.signer == nil {
		//TODO:这里为nil，签名失败，但是整体流程成功。
		return nil, errors.New("ts.signer is nil")
	}
	return ts.signer.Sign(hashType, payloadBytes)
}

func (ts *TxScheduler) createChargeGasTx(addressCache map[string]string,
	snapshot protocol.Snapshot) (*commonPb.Transaction, error) {

	var (
		err error
	)
	address2TotalGas := make(map[string]uint64)

	txTable := snapshot.GetTxTable()
	txMap := snapshot.GetTxResultMap()
	for _, tx := range txTable {
		address, ok := addressCache[tx.Payload.TxId]
		if !ok {
			ts.log.Warnf("load address from cache failed for unknown reason")
			_, address, err = getPayerPkAndAddress(tx, snapshot)
			if err != nil {
				ts.log.Errorf("getPayerPkAndAddress failed: err = %v", err)
				continue
			}
		}

		totalGas, exists := address2TotalGas[address]
		if !exists {
			totalGas = uint64(0)
			address2TotalGas[address] = totalGas
		}

		txResult := txMap[tx.Payload.TxId]
		totalGas += txResult.ContractResult.GasUsed

		address2TotalGas[address] = totalGas
	}

	// 构造参数
	parameters := make([]*commonPb.KeyValuePair, 0)
	for address, totalGas := range address2TotalGas {
		keyValuePair := commonPb.KeyValuePair{
			Key:   address,
			Value: []byte(fmt.Sprintf("%d", totalGas)),
		}
		parameters = append(parameters, &keyValuePair)
	}

	ts.log.Debugf("charge_gas_tx's params = %v", parameters)

	// 构造 Payload
	payload := &commonPb.Payload{
		ChainId:        ts.chainConf.ChainConfig().ChainId,
		TxType:         commonPb.TxType_INVOKE_CONTRACT,
		TxId:           utils.GetRandTxId(),
		Timestamp:      time.Now().Unix(),
		ExpirationTime: time.Now().Add(time.Second * 1).Unix(),
		ContractName:   syscontract.SystemContract_ACCOUNT_MANAGER.String(),
		Method:         syscontract.GasAccountFunction_CHARGE_GAS_FOR_MULTI_ACCOUNT.String(),
		Parameters:     parameters,
		Sequence:       uint64(0),
		Limit:          &commonPb.Limit{GasLimit: uint64(0)},
	}

	// 对 Payload 签名
	signature, err := ts.signTxPayload(payload)
	if err != nil {
		ts.log.Errorf("createChargeGasTx => signTxPayload() error: %v", err.Error())
		return nil, err
	}

	// 构造 Transaction
	signingMember, err := ts.signer.GetMember()
	if err != nil {
		ts.log.Errorf("createChargeGasTx => GetMember() error: %v", err.Error())
		return nil, err
	}

	endorser := &commonPb.EndorsementEntry{
		Signer: &accesscontrol.Member{
			OrgId:      signingMember.OrgId,
			MemberInfo: signingMember.MemberInfo,
			MemberType: signingMember.MemberType,
		},
		Signature: signature,
	}

	return &commonPb.Transaction{
		Payload: payload,
		Sender: &commonPb.EndorsementEntry{
			Signer:    signingMember,
			Signature: signature,
		},
		Endorsers: []*commonPb.EndorsementEntry{endorser},
		Result:    nil,
	}, nil
}

// nolint: unused
func (ts *TxScheduler) createCoinbaseTx(addressCache map[string]string,
	snapshot protocol.Snapshot) (*commonPb.Transaction, error) {

	var (
		err error
	)
	senders := make(map[string][]byte)
	parameters := make([]*commonPb.KeyValuePair, 0)
	address2TotalGas := make(map[string]uint64)
	txTable := snapshot.GetTxTable()
	txMap := snapshot.GetTxResultMap()
	for _, tx := range txTable {
		address, ok := addressCache[tx.Payload.TxId]
		if !ok {
			ts.log.Warnf("load address from cache failed for unknown reason")
			_, address, err = getPayerPkAndAddress(tx, snapshot)
			if err != nil {
				ts.log.Errorf("getPayerPkAndAddress failed, err = %v", err)
				continue
			}
		}

		totalGas, exists := address2TotalGas[address]
		if !exists {
			totalGas = uint64(0)
			address2TotalGas[address] = totalGas
		}

		txResult := txMap[tx.Payload.TxId]
		totalGas += txResult.ContractResult.GasUsed

		address2TotalGas[address] = totalGas
	}
	for address, totalGas := range address2TotalGas {
		senders[address] = []byte(fmt.Sprintf("%d", totalGas))
	}

	senderBytes, err := json.Marshal(senders)
	if err != nil {
		ts.log.Errorf(" Marshal senders error: %v", err.Error())
		return nil, err
	}
	kvPair := commonPb.KeyValuePair{
		Key:   chargeGasVmForMultiAccountParameterKey,
		Value: senderBytes,
	}
	parameters = append(parameters, &kvPair)

	// 构造 Payload
	payload := &commonPb.Payload{
		ChainId:        ts.chainConf.ChainConfig().ChainId,
		TxType:         commonPb.TxType_INVOKE_CONTRACT,
		TxId:           utils.GetRandTxId(),
		Timestamp:      time.Now().Unix(),
		ExpirationTime: time.Now().Add(time.Second * 1).Unix(),
		ContractName:   syscontract.SystemContract_COINBASE.String(),
		Method:         syscontract.CoinbaseFunction_RUN_COINBASE.String(),
		Parameters:     parameters,
		Sequence:       uint64(0),
		Limit:          &commonPb.Limit{GasLimit: uint64(0)},
	}

	// 对 Payload 签名
	signature, err := ts.signTxPayload(payload)
	if err != nil {
		ts.log.Errorf("createCoinbaseTx => signTxPayload() error: %v", err.Error())
		return nil, err
	}

	// 构造 Transaction
	signingMember, err := ts.signer.GetMember()
	if err != nil {
		ts.log.Errorf("createCoinbaseTx => GetMember() error: %v", err.Error())
		return nil, err
	}

	return &commonPb.Transaction{
		Payload: payload,
		Sender: &commonPb.EndorsementEntry{
			Signer:    signingMember,
			Signature: signature,
		},
		Endorsers: make([]*commonPb.EndorsementEntry, 0),
		Result:    nil,
	}, nil
}

func (ts *TxScheduler) executeChargeGasTx(
	tx *commonPb.Transaction,
	block *commonPb.Block,
	snapshot protocol.Snapshot) protocol.TxSimContext {

	txSimContext := vm.NewTxSimContext(ts.VmManager, snapshot, tx, block.Header.BlockVersion, ts.log)
	ts.log.Debugf("new tx for charging gas, id = %s", tx.Payload.GetTxId())

	result := &commonPb.Result{
		Code: commonPb.TxStatusCode_SUCCESS,
		ContractResult: &commonPb.ContractResult{
			Code:    uint32(0),
			Result:  nil,
			Message: "",
		},
		RwSetHash: nil,
	}

	ts.log.Debugf("executeChargeGasTx => txSimContext.GetContractByName(`%s`)", tx.Payload.ContractName)
	contract, err := txSimContext.GetContractByName(tx.Payload.ContractName)
	if err != nil {
		ts.log.Errorf("Get contract info by name[%s] error:%s", tx.Payload.ContractName, err)
		result.ContractResult.Message = err.Error()
		result.Code = commonPb.TxStatusCode_INVALID_PARAMETER
		result.ContractResult.Code = 1
		txSimContext.SetTxResult(result)
		return txSimContext
	}

	params := make(map[string][]byte, len(tx.Payload.Parameters))
	for _, item := range tx.Payload.Parameters {
		address := item.Key
		data := item.Value
		params[address] = data
	}

	// this native contract call will never failed
	contractResultPayload, _, txStatusCode := ts.VmManager.RunContract(contract, tx.Payload.Method, nil,
		params, txSimContext, 0, tx.Payload.TxType)
	if txStatusCode != commonPb.TxStatusCode_SUCCESS {
		panic("running the tx of charging gas will never failed.")
	}
	result.Code = txStatusCode
	result.ContractResult = contractResultPayload
	ts.log.Debugf("finished tx for charging gas, id = :%s, txStatusCode = %v", tx.Payload.TxId, txStatusCode)

	txSimContext.SetTxResult(result)
	snapshot.ApplyTxSimContext(
		txSimContext,
		protocol.ExecOrderTxTypeChargeGas,
		true, true)

	return txSimContext
}

// nolint: unused
func (ts *TxScheduler) executeCoinbaseTx(
	tx *commonPb.Transaction,
	block *commonPb.Block,
	snapshot protocol.Snapshot) protocol.TxSimContext {

	txSimContext := vm.NewTxSimContext(ts.VmManager, snapshot, tx, block.Header.BlockVersion, ts.log)
	ts.log.Debugf("new tx for coinbase, id = %s", tx.Payload.GetTxId())

	result := &commonPb.Result{
		Code: commonPb.TxStatusCode_SUCCESS,
		ContractResult: &commonPb.ContractResult{
			Code:    uint32(0),
			Result:  nil,
			Message: "",
		},
		RwSetHash: nil,
	}

	ts.log.Debugf("executeCoinbaseTx => txSimContext.GetContractByName(`%s`)", tx.Payload.ContractName)
	contract, err := txSimContext.GetContractByName(tx.Payload.ContractName)
	if err != nil {
		ts.log.Errorf("Get contract info by name[%s] error:%s", tx.Payload.ContractName, err)
		result.ContractResult.Message = err.Error()
		result.Code = commonPb.TxStatusCode_INVALID_PARAMETER
		result.ContractResult.Code = 1
		txSimContext.SetTxResult(result)
		return txSimContext
	}

	params := make(map[string][]byte)
	for _, item := range tx.Payload.Parameters {
		address := item.Key
		data := item.Value
		params[address] = data
	}

	// this native contract call will never failed
	contractResultPayload, _, txStatusCode := ts.VmManager.RunContract(contract, tx.Payload.Method, nil,
		params, txSimContext, 0, commonPb.TxType_INVOKE_CONTRACT)
	if txStatusCode != commonPb.TxStatusCode_SUCCESS {
		ts.log.Errorf("txStatusCode = %d", txStatusCode)
		panic("running the tx of coinbase will never failed.")
	}
	result.Code = txStatusCode
	result.ContractResult = contractResultPayload
	ts.log.Debugf("finished tx for charging gas, id = :%s, txStatusCode = %v", tx.Payload.TxId, txStatusCode)

	txSimContext.SetTxResult(result)
	snapshot.ApplyTxSimContext(
		txSimContext,
		protocol.ExecOrderTxTypeChargeGas,
		true, true)

	return txSimContext
}

// appendChargeGasTxToDAG append the tx to the DAG with dependencies on all tx.
func (ts *TxScheduler) appendChargeGasTxToDAG(
	dag *commonPb.DAG,
	snapshot protocol.Snapshot) {

	dagNeighbors := &commonPb.DAG_Neighbor{
		Neighbors: make([]uint32, 0, snapshot.GetSnapshotSize()-1),
	}
	for i := uint32(0); i < uint32(snapshot.GetSnapshotSize()-1); i++ {
		dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, i)
	}
	dag.Vertexes = append(dag.Vertexes, dagNeighbors)
}

func errResult(result *commonPb.Result, err error) (*commonPb.Result, protocol.ExecOrderTxType, error) {
	result.ContractResult.Message = err.Error()
	result.Code = commonPb.TxStatusCode_INVALID_PARAMETER
	result.ContractResult.Code = 1
	return result, protocol.ExecOrderTxTypeNormal, err
}

// extract public key from cert
func publicKeyFromCert(member []byte) ([]byte, error) {
	certificate, err := utils.ParseCert(member)
	if err != nil {
		return nil, err
	}
	pubKeyStr, err := certificate.PublicKey.String()
	if err != nil {
		return nil, err
	}
	return []byte(pubKeyStr), nil
}

func wholeCertInfo(txSimContext protocol.TxSimContext, certHash string) (*commonPb.CertInfo, error) {
	certBytes, err := txSimContext.Get(syscontract.SystemContract_CERT_MANAGE.String(), []byte(certHash))
	if err != nil {
		return nil, err
	}

	return &commonPb.CertInfo{
		Hash: certHash,
		Cert: certBytes,
	}, nil
}

// SenderGroup sender group
type SenderGroup struct {
	txsMap     map[[32]byte][]*commonPb.Transaction
	doneTxKeyC chan [32]byte
}

// NewSenderGroup 构造SenderGroup
// @param txBatch
// @return *SenderGroup
func NewSenderGroup(txBatch []*commonPb.Transaction) *SenderGroup {
	return &SenderGroup{
		txsMap:     getSenderTxsMap(txBatch),
		doneTxKeyC: make(chan [32]byte, len(txBatch)),
	}
}

func getSenderTxsMap(txBatch []*commonPb.Transaction) map[[32]byte][]*commonPb.Transaction {
	senderTxsMap := make(map[[32]byte][]*commonPb.Transaction, len(txBatch))
	for _, tx := range txBatch {
		hashKey, _ := getSenderHashKey(tx)
		senderTxsMap[hashKey] = append(senderTxsMap[hashKey], tx)
	}
	return senderTxsMap
}

func getSenderHashKey(tx *commonPb.Transaction) ([32]byte, error) {
	sender := getTxPayerSigner(tx)
	keyBytes, err := sender.Marshal()
	if err != nil {
		return [32]byte{}, err
	}
	return sha256.Sum256(keyBytes), nil
}

func getTxPayerSigner(tx *commonPb.Transaction) *accesscontrol.Member {
	payer := tx.GetPayer()
	// don't need version compatibility
	if payer == nil {
		payer = tx.GetSender()
	}
	return payer.GetSigner()
}

//func (ts *TxScheduler) getSenderAddressFromTx(
//	tx *commonPb.Transaction, snapshot protocol.Snapshot) (string, error) {
//
//	var err error
//	signingMember := tx.GetSender().GetSigner()
//	if signingMember == nil {
//		err = errors.New(" can not find sender from tx ")
//		return "", err
//	}
//
//	pkPem, err := getMemberPkPem(signingMember, snapshot)
//	if err != nil {
//		return "", err
//	}
//
//	addressValue, exist := ts.pkAddressCache.Load(string(pkPem))
//	if exist {
//		address, ok := addressValue.(string)
//		if ok {
//			return address, nil
//		}
//
//		ts.pkAddressCache.Delete(string(pkPem))
//	}
//
//	pk, err := asym.PublicKeyFromPEM(pkPem)
//	if err != nil {
//		return "", fmt.Errorf("publicKeyFromPEM failed, err = %v", err)
//	}
//	address, err := pkToGasAddress(pk, snapshot.GetLastChainConfig())
//	if err != nil {
//		return "", err
//	}
//
//	ts.pkAddressCache.Store(string(pkPem), address)
//	return address, nil
//}
//
//func (ts *TxScheduler) getSenderPkFromTx(
//	tx *commonPb.Transaction, snapshot protocol.Snapshot) (crypto.PublicKey, error) {
//
//	var err error
//	var pk crypto.PublicKey
//	var pkPem []byte
//
//	signingMember := tx.GetSender().GetSigner()
//	if signingMember == nil {
//		err = errors.New(" can not find sender from tx ")
//		return nil, err
//	}
//
//	pkPem, err = getMemberPkPem(signingMember, snapshot)
//	if pkPem == nil && err != nil {
//		return nil, err
//	}
//
//	pkValue, exist := ts.pem2PkCache.Load(string(pkPem))
//	if exist {
//		pk, ok := pkValue.(crypto.PublicKey)
//		if ok {
//			return pk, nil
//		}
//
//		ts.pem2PkCache.Delete(pkPem)
//	}
//
//	pk, err = asym.PublicKeyFromPEM(pkPem)
//	if err != nil {
//		return nil, fmt.Errorf("publicKeyFromPEM failed, err = %v", err)
//	}
//	ts.pem2PkCache.Store(string(pkPem), pk)
//	return pk, nil
//}

// nolint: unused
func (ts *TxScheduler) appendCoinbaseToDAG(
	dag *commonPb.DAG,
	snapshot protocol.Snapshot) {

	dagNeighbors := &commonPb.DAG_Neighbor{
		Neighbors: make([]uint32, 0, snapshot.GetSnapshotSize()-1),
	}
	for i := uint32(0); i < uint32(snapshot.GetSnapshotSize()-1); i++ {
		dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, i)
	}
	dag.Vertexes = append(dag.Vertexes, dagNeighbors)
}

// getTxGasLimit get the gas limit field from tx, and will return err when the gas limit field is not set.
func getTxGasLimit(tx *commonPb.Transaction) (uint64, error) {
	var limit uint64

	if tx.Payload.Limit == nil {
		return limit, errors.New("tx payload limit is nil")
	}

	limit = tx.Payload.Limit.GasLimit
	return limit, nil
}

func (ts *TxScheduler) verifyExecOrderTxType(block *commonPb.Block,
	txExecOrderTypeMap map[string]protocol.ExecOrderTxType) (uint32, uint32, uint32, uint32, error) {

	txExecOrderNormalCount := uint32(0)
	txExecOrderIteratorCount := uint32(0)
	txExecOrderChargeGasCount := uint32(0)
	txExecOrderCoinBaseCount := uint32(0)

	// check type are all correct
	for i, tx := range block.Txs {
		t, ok := txExecOrderTypeMap[tx.Payload.GetTxId()]
		if !ok {
			return txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount,
				txExecOrderCoinBaseCount, fmt.Errorf("cannot get tx ExecOrderTxType, txId:%s", tx.Payload.GetTxId())
		}

		if t == protocol.ExecOrderTxTypeNormal {
			if txExecOrderIteratorCount == 0 {
				txExecOrderNormalCount++
			} else {
				txExecOrderIteratorCount++
			}
		} else if t == protocol.ExecOrderTxTypeIterator {
			txExecOrderIteratorCount++
		} else if t == protocol.ExecOrderTxTypeChargeGas {
			txExecOrderChargeGasCount++
			if uint32(i+1) != uint32(len(block.Txs)) {
				return txExecOrderNormalCount, txExecOrderIteratorCount,
					txExecOrderChargeGasCount, txExecOrderCoinBaseCount,
					fmt.Errorf("`charge_gas` tx is unexpected, txId:%s, index:%d", tx.Payload.GetTxId(), i)
			}
		} else if t == protocol.ExecOrderTxTypeCoinbase {
			txExecOrderCoinBaseCount++
			if uint32(i+1) != uint32(len(block.Txs)) {
				return txExecOrderNormalCount, txExecOrderIteratorCount,
					txExecOrderChargeGasCount, txExecOrderCoinBaseCount,
					fmt.Errorf("`coinbase` tx is unexpected, txId:%s, index:%d", tx.Payload.GetTxId(), i)
			}
		}
	}

	// 检查gas或coinbase交易个数
	// 240后 gas交易变更为coinbase交易,且gas交易数应为0
	blockVersion := block.GetHeader().BlockVersion
	if blockVersion >= blockVersion3000000 {
		if (coinbasemgr.CheckCoinbaseEnable(ts.chainConf)) && txExecOrderCoinBaseCount != 1 ||
			(!coinbasemgr.CheckCoinbaseEnable(ts.chainConf) && txExecOrderCoinBaseCount != 0) ||
			txExecOrderChargeGasCount != 0 {
			return txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount,
				txExecOrderCoinBaseCount, fmt.Errorf("verify coinbase's tx(%d) or gas's tx(%d) count failed",
					txExecOrderCoinBaseCount, txExecOrderChargeGasCount)
		}
	} else {
		if (coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf) && txExecOrderChargeGasCount != 1) ||
			(!coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf) && txExecOrderChargeGasCount != 0) {
			return txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount,
				txExecOrderCoinBaseCount, fmt.Errorf("verify gas's tx(%d) count failed",
					txExecOrderChargeGasCount)
		}
	}

	return txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount,
		txExecOrderCoinBaseCount, nil
}

// nolint
func (ts *TxScheduler) getTypeShouldBeByBlockVersion(blockVersion uint32, i int,
	block *commonPb.Block, typeShouldBe protocol.ExecOrderTxType) protocol.ExecOrderTxType {
	// 240 以后，gas交易变更为coinbase交易
	if blockVersion >= blockVersion3000000 {
		if coinbasemgr.CheckCoinbaseEnable(ts.chainConf) && uint32(i+1) == uint32(len(block.Txs)) {
			typeShouldBe = protocol.ExecOrderTxTypeCoinbase
		}

	} else {
		if coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf) && uint32(i+1) == uint32(len(block.Txs)) {
			typeShouldBe = protocol.ExecOrderTxTypeChargeGas
		}
	}
	return typeShouldBe
}

// compareDag compare dag. (Currently deprecated)
func (ts *TxScheduler) compareDag(block *commonPb.Block, snapshot protocol.Snapshot,
	txRWSetMap map[string]*commonPb.TxRWSet, txExecOrderTypeMap map[string]protocol.ExecOrderTxType) error {
	if block.Header.BlockVersion < blockVersion2300 {
		return nil
	}
	startTime := time.Now()
	txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount, txExecOrderCoinBaseCount, err :=
		ts.verifyExecOrderTxType(block, txExecOrderTypeMap)
	if err != nil {
		ts.log.Errorf("verifyExecOrderTxType has err:%s, tx type count:%d,%d,%d, block tx count:%d", err,
			txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount, block.Header.TxCount)
		return err
	}
	// rebuild and verify dag
	txRWSetTable := utils.RearrangeRWSet(block, txRWSetMap)
	if uint32(len(txRWSetTable)) !=
		txExecOrderNormalCount+txExecOrderIteratorCount+txExecOrderChargeGasCount+txExecOrderCoinBaseCount {
		return fmt.Errorf("txRWSetTable:%d != txExecOrderTypeCount:%d+%d+%d+%d", len(txRWSetTable),
			txExecOrderNormalCount, txExecOrderIteratorCount, txExecOrderChargeGasCount, txExecOrderCoinBaseCount)
	}

	// first, only build dag for normal tx
	txRWSetTable = txRWSetTable[0:txExecOrderNormalCount]
	dag := snapshot.BuildDAG(ts.chainConf.ChainConfig().Contract.EnableSqlSupport, txRWSetTable)

	blockVersion := block.GetHeader().BlockVersion
	fillGasBalanceErrDag(block, snapshot, blockVersion)

	// then, append special tx into dag
	if txExecOrderIteratorCount > 0 {
		appendSpecialTxsToDag(dag, txExecOrderIteratorCount)
	}

	// 软分叉处理，v240之后使用coinbase实现，不再有GasTx
	if blockVersion >= blockVersion3000000 {
		// coinbase Tx
		if coinbasemgr.CheckCoinbaseEnable(ts.chainConf) {
			ts.appendCoinbaseToDAG(dag, snapshot)
		}
	} else {
		if coinbasemgr.IsOptimizeChargeGasEnabled(ts.chainConf) && snapshot.GetSnapshotSize() > 0 {
			ts.appendChargeGasTxToDAG(dag, snapshot)
		}
	}

	equal, err := utils.IsDagEqual(block.Dag, dag)
	if err != nil {
		return err
	}
	if !equal {
		ts.log.Warnf("compare block dag (vertex:%d) with simulate dag (vertex:%d)",
			len(block.Dag.Vertexes), len(dag.Vertexes))
		ts.log.Warnf("producer.Dag = {}", block.Dag)
		ts.log.Warnf("verifier.Dag = {}", dag)
		return fmt.Errorf("simulate dag not equal to block dag")
	}
	timeUsed := time.Since(startTime)
	ts.log.Infof("compare dag finished, time used %v", timeUsed)
	return nil
}

func (ts *TxScheduler) releaseContractCache() {
	ts.contractCache.Range(func(key interface{}, value interface{}) bool {
		ts.contractCache.Delete(key)
		return true
	})
}

// appendSpecialTxsToDag similar to ts.simulateSpecialTxs except do not execute tx, only handle dag
// txExecOrderSpecialCount must >0
func appendSpecialTxsToDag(dag *commonPb.DAG, txExecOrderSpecialCount uint32) {
	txExecOrderNormalCount := uint32(len(dag.Vertexes))
	// the first special tx
	dagNeighbors := &commonPb.DAG_Neighbor{
		Neighbors: make([]uint32, 0, txExecOrderNormalCount),
	}
	for i := uint32(0); i < txExecOrderNormalCount; i++ {
		dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, i)
	}
	dag.Vertexes = append(dag.Vertexes, dagNeighbors)
	// other special tx
	for i := uint32(1); i < txExecOrderSpecialCount; i++ {
		dagNeighbors := &commonPb.DAG_Neighbor{
			Neighbors: make([]uint32, 0, 1),
		}
		// this special tx (txExecOrderNormalCount+i) only depend on previous special tx (txExecOrderNormalCount+i-1)
		dagNeighbors.Neighbors = append(dagNeighbors.Neighbors, txExecOrderNormalCount+i-1)
		dag.Vertexes = append(dag.Vertexes, dagNeighbors)
	}
}
