/*
 * Copyright Hundsun Technologies Inc. All Rights Reserved.
 */

package miner

import (
	"context"
	"errors"
	"sync"
	"time"

	"github.com/golang/protobuf/proto"
	"go.uber.org/atomic"
	commonconfig "hundsun.com/hsl/hschain/common/config"
	"hundsun.com/hsl/hschain/common/keystore"
	"hundsun.com/hsl/hschain/common/log/logging"
	"hundsun.com/hsl/hschain/common/message"
	"hundsun.com/hsl/hschain/common/types"
	"hundsun.com/hsl/hschain/common/util/pb"
	"hundsun.com/hsl/hschain/consensus/common"
	dposcomm "hundsun.com/hsl/hschain/consensus/impl/dpos/common"
	minercomm "hundsun.com/hsl/hschain/consensus/impl/dpos/miner/common"
	consensustypes "hundsun.com/hsl/hschain/consensus/types"
	"hundsun.com/hsl/hschain/module/common/utils"
	p2putils "hundsun.com/hsl/hschain/p2p/common/utils"
	"hundsun.com/hsl/hschain/protos"
	pbledger "hundsun.com/hsl/hschain/protos/ledger"
	"hundsun.com/hsl/hschain/protos/p2p"
	prototypes "hundsun.com/hsl/hschain/protos/types"
	"hundsun.com/hsl/hschain/store/mq"
)

const (
	configParamMinerAddress = "minerAddress"

	//configParamBlockProducingInterval 同一时间片内生产一个区块后等待多久再生产下一个区块（单位：ms）
	configParamBlockProducingInterval = "producingInterval"

	defaultProducingInterval = 100

	//configParamMinPendingTxFetchInterval 出块时，从交易缓冲池中获取交易的重试时间间隔（找不到时）
	configParamMinPendingTxFetchInterval = "minPendingTxFetchInterval"
	defaultMinPendingTxFetchInterval     = 100

	//configParamMinCheckBlockExecResultFetchInterval 出块时，获取验证块执行结果重试时间间隔（找不到时）
	configParamMinCheckBlockExecResultFetchInterval = "minCheckBlockExecResultFetchInterval"
	defaultMinCheckBlockExecResultFetchInterval     = 100

	logFieldMiner   = "miner"
	txMarkerLockFmt = "tx-marker-_%d"
)

// NewMiner 新建一个Miner模块
func NewMiner(client *common.BaseClient) *DposMiner {
	m := &DposMiner{
		TxMarkerLockHoldFlag: atomic.NewBool(false),
		baseClient:           client,
	}

	return m
}

// DposMiner dpos共识Miner结构定义
type DposMiner struct {
	//矿工帐号：用于对区块和区块背书结果进行签名
	MinerAccount keystore.Account

	// 注册到同一个服务注册中心的其他miner
	BrotherMiners *dposcomm.SharedMiners

	//是否在背书时可以标记缓冲池中的交易
	TxMarkerLockHoldFlag *atomic.Bool

	//当前轮次出的块或者收到的背书的块的栈
	CurrentRoundBlockBuffer *RoundBlocksBuffer

	blockSynchronizedMu sync.Mutex

	Producer *Producer

	ShutdownWg sync.WaitGroup

	Log logging.Log

	Config *commonconfig.ModuleConfig

	baseClient *common.BaseClient

	rootCtx context.Context
}

// Start 启动Miner模块
func (m *DposMiner) Start(rootCtx context.Context, cfg *commonconfig.ModuleConfig, log logging.Log) {
	m.rootCtx, _ = context.WithCancel(rootCtx)
	m.Config = cfg
	m.Log = log

	err := minercomm.Initialize(m.rootCtx, m.Log, m.baseClient)
	if err != nil {
		m.Log.Errorf("initialize failed, err: %s", err.Error())
		return
	}

	maxFeeOfBlock := m.baseClient.GetCurParam().GetFeeLimit().GetMaxBlockFee()
	if maxFeeOfBlock == 0 {
		maxFeeOfBlock = consensustypes.MaxBlockFee
	}

	maxTxCountOfBlock := m.baseClient.GetCurParam().GetBlockLimit().GetMaxTxCount()
	if maxTxCountOfBlock == 0 {
		maxTxCountOfBlock = consensustypes.MaxTxCountOfBlock
	}

	m.Log.Infof("PeerID private key path: %s", m.Config.Parameters["peerIdPriKeyPath"])
	localPeerID, err := p2putils.IDFromPrivateKey(m.Config.Parameters["peerIdPriKeyPath"])
	if err != nil {
		m.Log.Errorf("load private key failed, err: %s", err.Error())
		return
	}
	ks := keystore.GetKeyStore()
	genesisSeqCenterPeerID := pb.GetGenesisTransactionPayload().GetSequenceCenter().GetPeerID()
	if genesisSeqCenterPeerID != localPeerID.String() {
		m.Log.Infof("No need to start sequence center, localPeerID: " + localPeerID.String() + ", genesis seqcenter peerID: " + genesisSeqCenterPeerID)
	} else {
		go func() {
			err = m.baseClient.InitBlockSequenceServer(ks)
			if err != nil {
				m.Log.Errorf("Init block sequence server failed, %s", err.Error())
			}
		}()
	}

	minerAddress := utils.GetStringConfigParameterValue(m.Config, configParamMinerAddress, "")
	if len(minerAddress) == 0 {
		m.Log.Errorf("address of miner[%s] not configured", localPeerID)
		return
	}

	m.MinerAccount = ks.GetAccount(protos.Address(minerAddress))
	if m.MinerAccount == nil {
		m.Log.Errorf("account '%s' not found in keystore", minerAddress)
		return
	}

	m.Log = m.Log.WithField(logFieldMiner, protos.Address(minerAddress).GetShortString())
	m.BrotherMiners = &dposcomm.SharedMiners{}
	m.CurrentRoundBlockBuffer = NewBlockBuffer(dposcomm.MaxBlocksInTimeSlice * dposcomm.MaxMinersCount)

	m.baseClient.SubRelayMsgRequestFunc(func(msg *p2p.P2PMsg) error {
		relayMsg, err := message.DecodeMessage(msg)
		if err != nil {
			return err
		}

		if relayMsg.MsgType == consensustypes.ConsensusEndorserMsg {
			if msg.PeerId == msg.TargetPeerId {
				m.Log.Infof("No need to endorse for block generated by local peer.")
				return nil
			}
			m.Log.Infof("receive ed request, originPeer: %s and targetPeer: %s", msg.PeerId, msg.TargetPeerId)
			return m.Endorse(msg.PeerId, relayMsg)
		} else if relayMsg.MsgType == consensustypes.ConsensusEndorserRespMsg {
			m.Log.Infof("receive ed response, originPeer: %s and targetPeer: %s", msg.PeerId, msg.TargetPeerId)
			return m.Producer.recvEndorse(msg.PeerId, relayMsg)
		} else if relayMsg.MsgType == consensustypes.BlockSequenceAddBlockRespMsg {
			return m.Producer.SeqBlockSavedCallback(msg.PeerId, relayMsg)
		}
		return errors.New("unknown msg type")
	})

	// 初始化区块生产者
	producerCtx, _ := context.WithCancel(m.rootCtx)
	producingInterval := utils.GetIntConfigParameterValue(m.Config, configParamBlockProducingInterval, defaultProducingInterval)
	pendingTxsRetryInterval := utils.GetIntConfigParameterValue(m.Config, configParamMinPendingTxFetchInterval, defaultMinPendingTxFetchInterval)
	blockExecResultRetryInterval := utils.GetIntConfigParameterValue(m.Config, configParamMinCheckBlockExecResultFetchInterval, defaultMinCheckBlockExecResultFetchInterval)

	// 序列化中心PeerID
	m.Producer = &Producer{
		module:                m,
		ctx:                   producerCtx,
		producedBlockEdFailed: atomic.NewBool(false),
		sequenceCenterPeer:    genesisSeqCenterPeerID,
	}
	err = m.Producer.init(m.MinerAccount, producingInterval, pendingTxsRetryInterval, blockExecResultRetryInterval)
	if err != nil {
		m.Log.Errorf("init producer failed, err: %s", err.Error())
		return
	}

	m.ProduceBlocks()

	m.ShutdownWg.Add(1)
}

// ProduceBlocks 生产块
func (m *DposMiner) ProduceBlocks() {
	go m.Producer.Run()
}

// ClearBlockSynchronized 清理区块同步状态
func (m *DposMiner) ClearBlockSynchronized() {
	m.baseClient.SetBlockSynchronized(false)
}

// IsBlockSynchronized 区块同步校验
func (m *DposMiner) IsBlockSynchronized() bool {
	if m.baseClient.BlockSynchronized() {
		return true
	}
	m.blockSynchronizedMu.Lock()
	defer m.blockSynchronizedMu.Unlock()
	//double check
	lastSequence := m.GetClient().GetLastBlockSequence()

	var currentSequence uint64
	var err error
	if m.GetClient().GetSequenceServer() == nil {
		currentSequence, err = m.RequestCurrentBlockSequence()
		if err != nil {
			m.Log.Errorf("request current block sequence failed, err: %s", err.Error())
			return false
		}
	} else {
		currentSequence = m.baseClient.GetSequenceServer().GetCurrentSequence()
		m.Log.Infof("get current block sequence from local, currentSequence: %d", currentSequence)
	}

	if m.baseClient.BlockSynchronized() {
		return true
	}

	synchronized := lastSequence >= currentSequence
	m.Log.Debugf("currentSequence: %d and chkMsg block sequence: %d", currentSequence, lastSequence)
	m.baseClient.SetBlockSynchronized(synchronized)
	m.Log.Infof("Block synchronized finished, synchronized %v", synchronized)
	return synchronized
}

// GetClient 获取共识模块mqClient
func (m *DposMiner) GetClient() *common.BaseClient {
	return m.baseClient
}

// RequestCurrentBlockSequence 请求当前区块序列号
func (m *DposMiner) RequestCurrentBlockSequence() (uint64, error) {
	var relayMsg = &consensustypes.ConsensRelayMsg{}
	relayMsg.MsgType = consensustypes.BlockSequenceRequestMsg

	p2pMsg, err := message.EncodeMessage(m.Producer.sequenceCenterPeer, relayMsg)
	if err != nil {
		return 0, err
	}

	reqMsg := m.GetClient().GetWorkerClient().NewMessage(types.P2pModule, mq.ModeWorker.String(), types.EventConsensusSyncRequestMsg, p2pMsg)
	err = m.GetClient().GetWorkerClient().Send(reqMsg, true)
	if err != nil {
		return 0, err
	}

	resp, err := m.GetClient().GetWorkerClient().WaitTimeout(reqMsg, time.Second*consensustypes.DefaultMqTimeOut)
	if err != nil {
		return 0, err
	}
	p2pResp := resp.Data.(*p2p.P2PMsg)
	relayResp, err := message.DecodeMessage(p2pResp)
	if err != nil {
		return 0, err
	}

	var currentSequence = &prototypes.Uint64{}
	err = proto.Unmarshal(relayResp.Payload, currentSequence)
	if err != nil {
		return 0, err
	}
	return currentSequence.I, nil
}

func getBlockNumber(block *pbledger.Block) uint64 {
	if block.Header == nil {
		return 0
	}
	return block.Header.Number
}

// NewBlockBuffer 新建block buffer缓存
func NewBlockBuffer(cap int) *RoundBlocksBuffer {
	return &RoundBlocksBuffer{
		size: 0,
		data: make([]*pbledger.Block, cap),
	}
}

// RoundBlocksBuffer 当前轮次区块的缓存，块必须连续
type RoundBlocksBuffer struct {
	data []*pbledger.Block
	size int
	sync.RWMutex
}

// Add 添加一个区块，如下情况可能添加失败：
// 1）容量已满
// 2）块不是预期的下一块
func (r *RoundBlocksBuffer) Add(blk *pbledger.Block, log logging.Log) bool {
	bLog := log.WithField(logging.LogFieldBlockNumber, blk.Header.Number)
	r.Lock()
	defer r.Unlock()
	bLog.Trace("adding to RoundBlocksBuffer")
	if r.size == len(r.data) {
		bLog.Trace("RoundBlocksBuffer is full")
		return false
	}
	if r.size > 0 && blk.Header.Number-r.data[r.size-1].Header.Number != 1 {
		bLog.Tracef("block is not the next block of last block[%d] in buffer", r.data[r.size-1].Header.Number)
		return false
	}
	r.data[r.size] = blk
	r.size++
	bLog.Tracef("added to block buffer,current buffer size:%d", r.size)
	return true
}

//Clear 清空元素
func (r *RoundBlocksBuffer) Clear(log logging.Log) {
	r.Lock()
	defer r.Unlock()
	log.Trace("RoundBlocksBuffer cleared")
	r.size = 0
}

//IsEmpty 是否为空
func (r *RoundBlocksBuffer) IsEmpty() bool {
	r.RLock()
	defer r.RUnlock()
	return r.size == 0
}

// GetLast 获取最后一个块，如果为空则返回nil
func (r *RoundBlocksBuffer) GetLast() *pbledger.Block {
	r.RLock()
	defer r.RUnlock()
	if r.size == 0 {
		return nil
	}
	return r.data[r.size-1]
}

// PurgeBlockNumberEq 从后到前删除缓存中的元素直到缓存为空或者最后一个块的块号等于指定块，返回最后一个块
func (r *RoundBlocksBuffer) PurgeBlockNumberEq(blockNumber uint64, log logging.Log) *pbledger.Block {
	log.Tracef("start purging blocks from buffer{size=%d} until block[%d]", r.size, blockNumber)
	defer log.Tracef("finish purging blocks from buffer{size=%d} until block[%d]", r.size, blockNumber)
	for r.size > 0 && r.data[r.size-1].Header.Number > blockNumber {
		r.size--
		log.Tracef("block[%d] purged", r.data[r.size].Header.Number)
	}
	if r.size > 0 {
		return r.data[r.size-1]
	}
	return nil
}
