/*
 * Copyright Hundsun Technologies Inc. All Rights Reserved.
 */

package service

import (
	"context"
	"math/rand"
	"runtime"
	"strconv"
	"time"

	"hundsun.com/hsl/hschain/common/config"
	"hundsun.com/hsl/hschain/common/log/logging"
	"hundsun.com/hsl/hschain/common/types"
	"hundsun.com/hsl/hschain/contractmanager"
	ecom "hundsun.com/hsl/hschain/executor/common"
	pbcom "hundsun.com/hsl/hschain/protos/common"
	pb "hundsun.com/hsl/hschain/protos/execute"
	"hundsun.com/hsl/hschain/protos/ledger"
	"hundsun.com/hsl/hschain/store"
	dbm "hundsun.com/hsl/hschain/store/db"
	"hundsun.com/hsl/hschain/store/extend/account"
	"hundsun.com/hsl/hschain/store/mq"
	"hundsun.com/hsl/hschain/store/state"
)

const (
	// DefaultBatchScanNum 批量查询数
	DefaultBatchScanNum = 2000
)

// WorkerService worker service
type WorkerService struct {
	config *config.ModuleConfig
	conf   *Config
	log    logging.Log
	ctx    context.Context
	module *workerModule

	ledgerDB    dbm.Database // 存储账本数据句柄
	stateDB     state.State  // 存储状态数据DB句柄
	topicClient mq.Client    // topic类型client
	workClient  mq.Client    // work类型client
}

// Config ...
type Config struct {
	WorkerID      int // 每个worker的编号
	ExecThreadNum int // 执行线程数
	BatchScanNum  int // 批量读取kv数
}

type childResult struct {
	index   int
	execRes []*pb.TransactionGroup
	midAddr bool
	txCount int
	err     error
}

// Init 初始化
func (w *WorkerService) Init(module *workerModule) error {
	w.config = module.Config
	w.conf = parseModuleConfig(module.Config)
	w.log = log
	ctx, _ := context.WithCancel(module.RootCtx)
	w.ctx = ctx
	w.module = module
	w.ledgerDB = store.GetLedgerDB()
	statedb, err := store.NewState(store.GetStateDB())
	if err != nil {
		panic(err)
	}
	w.stateDB = statedb
	tClient, err := store.NewMqClient(mq.ModeTopic.String())
	if err != nil {
		panic(err)
	}
	tClient.Sub(types.WorkerModule, "topic")
	w.topicClient = tClient
	wClient, err := store.NewMqClient(mq.ModeWorker.String())
	if err != nil {
		panic(err)
	}
	wClient.Sub(types.WorkerModule, "worker")
	w.workClient = wClient
	return nil
}

// Close ...
func (w *WorkerService) Close() error {
	if w.topicClient != nil {
		w.topicClient.Close()
	}
	if w.workClient != nil {
		w.workClient.Close()
	}
	return nil
}

func parseModuleConfig(cfg *config.ModuleConfig) *Config {
	res := &Config{
		WorkerID:      rand.Int(), // 随机生成一个workerId
		ExecThreadNum: runtime.NumCPU(),
		BatchScanNum:  DefaultBatchScanNum,
	}
	if cfg == nil {
		log.Warnf("parseModuleConfig fail, cfg is nil")
		return res
	}

	params := cfg.Parameters
	if num, err := strconv.Atoi(params["workerId"]); err == nil && num >= 0 {
		res.WorkerID = num
	}
	if num, err := strconv.Atoi(params["execThreadNum"]); err == nil && num > 0 {
		res.ExecThreadNum = num
	}
	if num, err := strconv.Atoi(params["batchScanNum"]); err == nil && num > 0 {
		res.BatchScanNum = num
	}

	return res
}

// AddContracts 载入合约配置
func AddContracts(mpcfg map[string]*config.PluginConfig) {
	contractmanager.AddContracts(mpcfg)
}

func (w *WorkerService) pubHeartbeatToBoss() {
	client := w.topicClient
	if client == nil {
		panic("pubHeartbeatTo BlockChain topicClient is nil")
	}
	// 每隔2s pub 一次心跳
	t := time.NewTicker(time.Second * 2)
	for {
		select {
		case <-w.ctx.Done():
			return
		case <-t.C:
			if w.topicClient != nil {

				msg := client.NewMessage(types.BlockChainModule, "topic", types.EventBlockWorkerHeartbeat, w.conf.WorkerID)
				err := client.Send(msg, false)
				if err != nil {
					w.log.Errorf("pubHeartbeatToBoss err %v", err)
				}
			}
		}
	}
}

// ParallelExecute 并行执行
func (w *WorkerService) ParallelExecute(groups []*pb.TransactionGroup, currentBlockHeader *ledger.BlockHeader,
	preBlockExecResults *pb.ExecStateDatas,
	fn func(groups []*pb.TransactionGroup, currentBlockHeader *ledger.BlockHeader,
		preBlockExecResults *pb.ExecStateDatas) ([]*pb.TransactionGroup, bool, int, error)) ([]*pb.TransactionGroup, bool, int, error) {
	log := w.log
	ncpu := w.conf.ExecThreadNum
	if len(groups) <= 1 || ncpu <= 1 {
		return fn(groups, currentBlockHeader, preBlockExecResults)
	}

	step := len(groups) / ncpu
	if step < 1 {
		step = 1
	}

	ch := make(chan *childResult, 10)
	rem := len(groups) % step
	l := len(groups) / step
	if rem != 0 {
		l++
	}
	log.Debugf("ParallelExecute ncpu [%d], step [%d], cell num [%d]", ncpu, step, l)

	for i := 0; i < l; i++ {
		end := (i + 1) * step
		if end > len(groups) {
			end = len(groups)
		}
		child := groups[i*step : end]
		go func(index int, localGroups []*pb.TransactionGroup) {
			grResult, isMidAddr, txCount, err := fn(localGroups, currentBlockHeader, preBlockExecResults)
			ch <- &childResult{
				index:   index,
				execRes: grResult,
				midAddr: isMidAddr,
				txCount: txCount,
				err:     err,
			}
		}(i, child)
	}

	childlist := make([][]*pb.TransactionGroup, l)
	var midAddr bool
	var err error
	var sumTxCount int
	for i := 0; i < l; i++ {
		sub := <-ch
		sumTxCount += sub.txCount
		childlist[sub.index] = append(childlist[sub.index], sub.execRes...)
		if !midAddr && sub.midAddr {
			midAddr = true
		}
		if err == nil && sub.err != nil {
			err = sub.err
		}
	}
	var resTxGrops []*pb.TransactionGroup
	for _, list := range childlist {
		resTxGrops = append(resTxGrops, list...)
	}
	return resTxGrops, midAddr, sumTxCount, err
}

// Execute 执行
func (w *WorkerService) Execute(groups []*pb.TransactionGroup, header *ledger.BlockHeader,
	preBlockExecResults *pb.ExecStateDatas) ([]*pb.TransactionGroup, bool, int, error) {
	log := w.log
	start := time.Now()
	batchCount := w.conf.BatchScanNum
	stateDataMap, err := GetPreStateData(groups, preBlockExecResults, batchCount, w.stateDB)
	log.Debugf("PreBatchGetKVs execute block num is %d, group count is %d, cost time %v, err is %v \n",
		header.Number, len(groups), time.Since(start), err)
	//是否产生了新的中间地址
	haveMidAddress := false
	groupsTxCount := 0
	startTime := time.Now()
	defer func() {
		log.Infof("execute block num is %d, group count is %d, groupsTxCount %d, cost time %v \n",
			header.Number, len(groups), groupsTxCount, time.Since(startTime))
	}()
	ctx := ecom.NewExecContent(
		header,
		preBlockExecResults.PreStateHash,
		w.ledgerDB,
		ecom.NewStateHandle(nil, stateDataMap, w.stateDB),
	)
	var reGroups []*pb.TransactionGroup
	for _, grp := range groups {
		groupsTxCount += len(grp.Txs)
		for i, groupedTx := range grp.Txs {
			txres, midAddr := ExecTx(ctx, groupedTx)
			groupedTx.TxExecRst = txres.TxExecRst
			groupedTx.VarsRWStatus = txres.VarsRWStatus
			grp.Txs[i] = groupedTx
			// updata state
			ctx.GetStateHandle().UpdatePreState()
			// check have middle address generate
			if !haveMidAddress && midAddr {
				haveMidAddress = midAddr
			}
		}
		reGroups = append(reGroups, grp)
	}
	return reGroups, haveMidAddress, groupsTxCount, nil
}

// GetPreStateData 仅仅只从数据库批量预读account账户,其他的kv不做预读
func GetPreStateData(groups []*pb.TransactionGroup, preBlockExecResults *pb.ExecStateDatas,
	maxBatchCount int, statedb state.State) (map[string][]byte, error) {
	scanaddrs := make(map[string]struct{})
	stateMap := make(map[string][]byte)
	// 上一个未存储区块的状态数据
	for _, kv := range preBlockExecResults.Kvs {
		stateMap[string(kv.Key)] = kv.Value
	}
	count := 0
	for _, grp := range groups {
		for _, tx := range grp.Txs {
			keys := getTxScanAccountAddr(tx, stateMap)
			for _, k := range keys {
				scanaddrs[k] = struct{}{}
			}
			count += len(keys)
		}
		if count >= maxBatchCount {
			break
		}
	}

	scans := make([][]byte, len(scanaddrs))
	index := 0
	for key := range scanaddrs {
		scans[index] = []byte(key)
		index++
	}

	if statedb != nil {
		data := &pbcom.StateGet{
			StateHash: preBlockExecResults.PreStateHash,
			Keys:      scans,
		}
		values, _ := statedb.Get(data)
		for i, value := range values {
			if len(value) > 0 {
				stateMap[string(scans[i])] = value
			}
		}
	}
	return stateMap, nil
}

func getTxScanAccountAddr(tx *pb.GroupedTransaction, pre map[string][]byte) (keys []string) {
	from := tx.GetTx().GetFrom()
	to := tx.GetTx().GetTo()
	if from != "" {
		key := account.GenAccountKey(from)
		if _, ok := pre[string(key)]; !ok {
			keys = append(keys, string(key))
		}
	}
	if to != "" {
		key := account.GenAccountKey(to)
		if _, ok := pre[string(key)]; !ok {
			keys = append(keys, string(key))
		}
	}
	return keys
}
