package storeman

import (
	"bytes"
	"context"
	"errors"
	"fmt"
	"github.com/wanchain/go-mpc/params"
	"github.com/wanchain/go-mpc/rlp"
	"github.com/wanchain/go-mpc/storeman/schnorrmpc"
	"github.com/wanchain/go-mpc/storeman/schnorrmpcbn"
	"math/big"
	"path/filepath"
	"sync"
	"time"

	"github.com/wanchain/go-mpc/common"
	"github.com/wanchain/go-mpc/storeman/btc"
	"github.com/wanchain/go-mpc/storeman/osmconf"

	"os"

	"github.com/wanchain/go-mpc/accounts"
	"github.com/wanchain/go-mpc/log"
	"github.com/wanchain/go-mpc/p2p"
	"github.com/wanchain/go-mpc/p2p/discover"
	"github.com/wanchain/go-mpc/rpc"
	"github.com/wanchain/go-mpc/storeman/storemanmpc"
	mpcprotocol "github.com/wanchain/go-mpc/storeman/storemanmpc/protocol"
	"github.com/wanchain/go-mpc/storeman/validator"
)

type Config struct {
	StoremanNodes     []*discover.Node
	Password          string
	WorkingPwd        string
	DataPath          string
	SchnorrThreshold  int
	SchnorrTotalNodes int
}

var DefaultConfig = Config{
	StoremanNodes:     make([]*discover.Node, 0),
	SchnorrThreshold:  26,
	SchnorrTotalNodes: 50,
}

type StrmanKeepAlive struct {
	version   int
	magic     int
	recipient discover.NodeID
}

type StrmanKeepAliveOk struct {
	version int
	magic   int
	status  int
}

type StrmanAllPeers struct {
	Ip     []string
	Port   []string
	Nodeid []string
}

type StrmanGetPeers struct {
	LocalPort string
}

const keepaliveMagic = 0x33

// New creates a Whisper client ready to communicate through the Ethereum P2P network.
func New(cfg *Config, accountManager *accounts.Manager, aKID, secretKey, region string) *Storeman {
	storeman := &Storeman{
		peers:      make(map[discover.NodeID]*Peer),
		quit:       make(chan struct{}),
		cfg:        cfg,
		isSentPeer: false,
		peersPort:  make(map[discover.NodeID]string),
	}

	storeman.mpcDistributor = storemanmpc.CreateMpcDistributor(accountManager,
		storeman,
		aKID,
		secretKey,
		region,
		cfg.Password,
		storeman)

	dataPath := filepath.Join(cfg.DataPath, "storeman", "data")
	if _, err := os.Stat(dataPath); os.IsNotExist(err) {
		if err := os.MkdirAll(dataPath, 0700); err != nil {
			log.SyslogErr("make Storeman path fail", "err", err.Error())
		}
	}
	log.Info("==================================")
	v := fmt.Sprintf("%d.%d.%d", params.VersionMajor, params.VersionMinor, params.VersionPatch)
	log.SyslogInfo("=========New storeman", "DB file path", dataPath, "version", v)
	log.Info("==================================")

	err := validator.NewDatabase(dataPath)
	//err = errors.New("hi....") // just for test
	if err != nil {
		panic(fmt.Sprintf("Failed to create db.error:%v path: %v", err, dataPath))
	}
	// p2p storeman sub protocol handler
	storeman.protocol = p2p.Protocol{
		Name:    mpcprotocol.PName,
		Version: uint(mpcprotocol.PVer),
		Length:  mpcprotocol.NumberOfMessageCodes,
		Run:     storeman.HandlePeer,
		NodeInfo: func() interface{} {
			return map[string]interface{}{
				"version": mpcprotocol.PVerStr,
			}
		},
	}

	return storeman
}

////////////////////////////////////
// Storeman
////////////////////////////////////
type Storeman struct {
	protocol       p2p.Protocol
	peers          map[discover.NodeID]*Peer
	storemanPeers  map[discover.NodeID]bool
	peerMu         sync.RWMutex  // Mutex to sync the active peer set
	quit           chan struct{} // Channel used for graceful exit
	mpcDistributor *storemanmpc.MpcDistributor
	cfg            *Config
	server         *p2p.Server
	isSentPeer     bool
	peersPort      map[discover.NodeID]string

	//allPeersConnected chan bool
}

// MaxMessageSize returns the maximum accepted message size.
func (sm *Storeman) MaxMessageSize() uint32 {
	// what is the max size of storeman???
	return uint32(1024 * 1024)
}

// runMessageLoop reads and processes inbound messages directly to merge into client-global state.
func (sm *Storeman) runMessageLoop(p *Peer, rw p2p.MsgReadWriter) error {

	log.SyslogInfo("****runMessageLoop begin****")
	defer log.SyslogInfo("runMessageLoop exit")

	for {
		// fetch the next packet
		packet, err := rw.ReadMsg()
		if err != nil {
			log.SyslogErr("runMessageLoop", "peer", p.Peer.ID().SlimString(), "err", err.Error())
			return err
		}

		s := rlp.NewStream(packet.Payload, 0)
		load, err := s.Raw()
		if err != nil {
			log.SyslogErr("runMessageLoop", "peer", p.Peer.ID().SlimString(), "s.Raw err", err.Error())
			return err
		}

		packetClone := p2p.Msg{Code: packet.Code, Size: packet.Size, Payload: bytes.NewReader(load[:]), ReceivedAt: packet.ReceivedAt}

		log.SyslogDebug("runMessageLoop, received a msg", "peer", p.Peer.ID().SlimString(), "packet size", packet.Size)
		if packet.Size > sm.MaxMessageSize() {
			log.SyslogWarning("runMessageLoop, oversized message received", "peer", p.Peer.ID().SlimString(), "packet size", packet.Size)
		} else {
			go sm.mpcDistributor.GetMessage(p.Peer.ID(), rw, &packetClone)
		}
		packet.Discard()
	}
}

// APIs returns the RPC descriptors the Whisper implementation offers
func (sm *Storeman) APIs() []rpc.API {
	return []rpc.API{
		{
			Namespace: mpcprotocol.PName,
			Version:   mpcprotocol.PVerStr,
			Service:   &StoremanAPI{sm: sm},
			Public:    true,
		},
	}
}

// Protocols returns the whisper sub-protocols ran by this particular client.
func (sm *Storeman) Protocols() []p2p.Protocol {
	return []p2p.Protocol{sm.protocol}
}

// Start implements node.Service, starting the background data propagation thread
// of the Whisper protocol.
func (sm *Storeman) Start(server *p2p.Server) error {

	sm.mpcDistributor.Self = server.Self()

	// set self node id into the osm config
	osmconf.GetOsmConf().SetSelfNodeId(&sm.mpcDistributor.Self.ID)

	log.SyslogInfo("starting deepsea_mpc", "version", mpcprotocol.PVerStr)

	// check groupInfo.json existing.

	osm := osmconf.GetOsmConf()
	existing, err := osmconf.PathExists(osm.GrpInfoPath())
	if err != nil {
		panic(err.Error())
	}
	if existing {
		// load config
		err = osm.LoadCnf(osm.GrpInfoPath())
		if err != nil {
			panic(err)
		}

		// check config
		osm.CheckOsmcnf()
		osmconf.GetOsmConf().SetSelfWorkingAddr()
	}
	sm.server = server

	err = sm.freshPeers()
	if err != nil {
		return err
	}

	go sm.mpcHeartBeat()

	go sm.mpcSntKnownManage()

	go sm.delPeer()

	return nil

}

func (sm *Storeman) delPeer() {

	// Start the tickers for the updates
	keepQuest := time.NewTicker(mpcprotocol.MpcDelPeerCycle)
	for {

		select {
		case <-keepQuest.C:
		case pd := <-sm.server.GetDelPeerToSmChan():
			log.SyslogInfo("storeman delPeer", "peerId", pd.ID())
			sm.peerMu.Lock()
			delete(sm.peers, pd.ID())
			sm.peerMu.Unlock()
		}
	}
}

func (sm *Storeman) freshPeers() error {

	storemanNodes, err := osmconf.GetOsmConf().GetAllPeersNodeIds()
	if err != nil {
		log.SyslogErr("Storeman", "freshPeers err ", err.Error())
		return err
	}

	sm.mpcDistributor.FreshPeer(&storemanNodes)

	sm.peerMu.Lock()
	sm.storemanPeers = make(map[discover.NodeID]bool)
	for _, item := range storemanNodes {
		sm.storemanPeers[item] = true
	}
	sm.peerMu.Unlock()

	return nil

}

func (sm *Storeman) mpcHeartBeat() {

	// Start the tickers for the updates
	keepQuest := time.NewTicker(mpcprotocol.MpcHeartBeatCycle)
	for {

		select {
		case <-keepQuest.C:
			log.SyslogInfo("mpcHeartBeat[mpc working]", "peerCount", len(sm.peers))

			index := 0
			for _, p := range sm.peers {
				log.SyslogInfo("mpcHeartBeat[detailed]", "peerIndex", index, "nodeId", p.ID().SlimString())
				index++
			}
		}
	}
}

func (sm *Storeman) mpcSntKnownManage() {

	// Start the tickers for the updates
	keepQuest := time.NewTicker(mpcprotocol.MpcSntKnownCycle)
	for {

		select {
		case <-keepQuest.C:
			log.Info("mpcSntKnownManage[clear sent and known]")
			sm.mpcDistributor.ClearSntKnownMsg()
		}
	}
}

// Stop implements node.Service, stopping the background data propagation thread
// of the Whisper protocol.
func (sm *Storeman) Stop() error {
	return nil
}

func (sm *Storeman) SendToPeer(peerID *discover.NodeID, msgcode uint64, data interface{}) error {
	sm.peerMu.RLock()
	defer sm.peerMu.RUnlock()
	peer, exist := sm.peers[*peerID]
	if exist {
		return p2p.Send(peer.ws, msgcode, data)
	} else {
		log.SyslogWarning("peer not find", "peer", peerID.SlimString())
	}
	return nil
}

func (sm *Storeman) IsActivePeer(peerID *discover.NodeID) bool {
	sm.peerMu.RLock()
	defer sm.peerMu.RUnlock()
	_, exist := sm.peers[*peerID]
	return exist
}

// HandlePeer is called by the underlying P2P layer when the whisper sub-protocol
// connection is negotiated.
func (sm *Storeman) HandlePeer(peer *p2p.Peer, rw p2p.MsgReadWriter) error {
	//boot node
	nodeId, _ := osmconf.GetOsmConf().GetSelfNodeId()
	if nodeId == nil || sm == nil || sm.server == nil || sm.server.BootstrapNodes == nil || len(sm.server.BootstrapNodes) == 0 {
		return errors.New("not ready for handle peers")
	}
	if nodeId.String() == sm.server.BootstrapNodes[0].ID.String() {
		log.Info("handle new peer, peer is boot node")
	} else { //non bootnode

		sm.peerMu.RLock()
		_, exist := sm.storemanPeers[peer.ID()]
		sm.peerMu.RUnlock()

		if !exist && peer.ID().String() != sm.server.BootstrapNodes[0].ID.String() {
			return errors.New("peer is not in storemangroup or not boot node")
		}
	}

	log.Info("handle new peer", "remoteAddr", peer.RemoteAddr().String(), "peerID", peer.ID().SlimString())

	// Create the new peer and start tracking it
	storemanPeer := newPeer(sm, peer, rw)

	sm.peerMu.Lock()
	sm.peers[storemanPeer.ID()] = storemanPeer
	sm.peerMu.Unlock()

	// Run the peer handshake and state updates
	if err := storemanPeer.handshake(); err != nil {
		log.SyslogErr("storemanPeer.handshake failed", "peerID", peer.ID().SlimString(), "err", err.Error())
		return err
	}

	defer func() {
		sm.peerMu.Lock()

		delete(sm.peers, storemanPeer.ID())

		for _, smnode := range sm.server.StoremanNodes {
			if smnode.ID == storemanPeer.ID() {
				log.Info("remove peer", "pid", smnode.ID)
				sm.server.RemovePeer(smnode)
				break
			}
		}

		sm.peerMu.Unlock()
	}()

	storemanPeer.start()
	defer storemanPeer.stop()

	return sm.runMessageLoop(storemanPeer, rw)
}

func (sm *Storeman) GetGroupAlivePeersInfo(groupPeers []mpcprotocol.PeerInfo) ([]mpcprotocol.PeerInfo, error) {
	sm.peerMu.RLock()
	defer sm.peerMu.RUnlock()

	peers := make([]mpcprotocol.PeerInfo, 0)
	for _, grpElem := range groupPeers {
		_, ok := sm.peers[grpElem.PeerID]
		if ok {
			peers = append(peers, grpElem)
		}
	}

	selfNodeId, err := osmconf.GetOsmConf().GetSelfNodeId()

	if err != nil {
		return peers, err
	}

	if _, b := sm.peers[*selfNodeId]; !b {
		peers = append(peers, mpcprotocol.PeerInfo{PeerID: *selfNodeId, Seed: 0})
	}

	return peers, nil
}

func (sm *Storeman) Peers() []*p2p.PeerInfo {
	sm.peerMu.RLock()
	defer sm.peerMu.RUnlock()

	var ps []*p2p.PeerInfo
	for _, p := range sm.peers {
		ps = append(ps, p.Peer.Info())
	}

	return ps
}

func (sm *Storeman) PeerIDs() []discover.NodeID {
	sm.peerMu.RLock()
	defer sm.peerMu.RUnlock()

	var ps []discover.NodeID
	for _, p := range sm.peers {
		ps = append(ps, p.ID())
	}

	return ps
}

////////////////////////////////////
// StoremanAPI
////////////////////////////////////
type StoremanAPI struct {
	sm *Storeman
}

func (sa *StoremanAPI) Version(ctx context.Context) (v string) {
	return mpcprotocol.PVerStr
}

func (sa *StoremanAPI) Peers(ctx context.Context) []*p2p.PeerInfo {
	return sa.sm.Peers()
}

func (sa *StoremanAPI) GrpsNodeIds(ctx context.Context) []discover.NodeID {
	var ret []discover.NodeID
	for nodeId, _ := range sa.sm.storemanPeers {
		ret = append(ret, nodeId)
	}
	return ret
}

func (sa *StoremanAPI) FreshGrpInfo(ctx context.Context) error {
	log.SyslogInfo("FreshGrpInfo begin")

	osm := osmconf.GetOsmConf()
	existing, err := osmconf.PathExists(osm.GrpInfoPath())
	if err != nil {
		panic(err.Error())
	}
	if existing {

		err := osmconf.GetOsmConf().FreshCnf(osmconf.GetOsmConf().GrpInfoPath())
		if err != nil {
			log.SyslogErr("FreshGrpInfo error", "error", err.Error())
		}

		// check config
		osm.CheckOsmcnf()
	} else {
		log.SyslogErr("FreshGrpInfo", "file not exist", osm.GrpInfoPath())
	}

	err = sa.sm.freshPeers()
	if err != nil {
		return err
	}
	log.SyslogInfo("FreshGrpInfo end")
	return err
}

func (sa *StoremanAPI) SignDataByApprove(ctx context.Context, data mpcprotocol.SendData) (result interface{}, err error) {

	PKBytes := data.PKBytes
	CurveBytes := data.Curve

	//signed, err := sa.sm.mpcDistributor.CreateReqMpcSign([]byte(data.Data), PKBytes)
	signed, err, mpcID := sa.sm.mpcDistributor.CreateReqMpcSign([]byte(data.Data), []byte(data.Extern), PKBytes, 1, CurveBytes, &data)

	// signed   R // s
	if err == nil {
		log.SyslogInfo("SignMpcTransaction Schnorr Approve successfully", "ctx", mpcID, "data", data.String())
	} else {
		log.SyslogErr("SignMpcTransaction Schnorr Approve error", "err", err.Error(), "ctx", mpcID, "data", data.String())
		return mpcprotocol.SignedResult{R: []byte{}, S: []byte{}}, err
	}

	return signed, nil
}

func (sa *StoremanAPI) SignData(ctx context.Context, data mpcprotocol.SendData) (result interface{}, err error) {

	// delete sign data in data base.
	//defer validator.DeleteData(&data)

	PKBytes := data.PKBytes
	CurveBytes := data.Curve

	//sa.sm.mpcDistributor.SetCurPeerCount(uint16(len(sa.sm.peers)) - 1)

	//sa.sm.mpcDistributor.SetCurPeerCount(uint16(len(sa.sm.peers)))
	//signed, err := sa.sm.mpcDistributor.CreateReqMpcSign([]byte(data.Data), PKBytes)
	log.SyslogInfo("SignMpcTransaction schnorr", "PKBytes", PKBytes)
	signed, err, mpcID := sa.sm.mpcDistributor.CreateReqMpcSign([]byte(data.Data), []byte(data.Extern), PKBytes, 0, CurveBytes, &data)

	// signed   R // s
	if err == nil {
		log.SyslogInfo("SignMpcTransaction Schnorr successfully", "ctx", mpcID, "data", data.String())
	} else {
		log.SyslogErr("SignMpcTransaction Schnorr error", "err", err.Error(), "ctx", mpcID, "data", data.String())
		return mpcprotocol.SignedResult{R: []byte{}, S: []byte{}}, err
	}

	return signed, nil
}

func (sa *StoremanAPI) AddValidData(ctx context.Context, data mpcprotocol.SendData) error {
	log.SyslogInfo("AddValidData", "AddValidData Schnorr", data.String())
	return validator.AddValidData(&data)
}

// non leader node polling the data received from leader node
func (sa *StoremanAPI) GetDataForApprove(ctx context.Context) ([]mpcprotocol.SendData, error) {
	return validator.GetDataForApprove()
}

//// non leader node ApproveData, and make sure that the data is really required to be signed by them.
func (sa *StoremanAPI) ApproveData(ctx context.Context, data []mpcprotocol.SendData) []error {
	for _, dataItem := range data {
		log.SyslogInfo("ApproveData", "AddValidData Schnorr Approve", dataItem.String())
	}
	return validator.ApproveData(data)
}

//////////////// add for BTC begin

// solution one begin
func (sa *StoremanAPI) SignMpcBtcTransaction(ctx context.Context, args btc.MsgTxArgs) (*mpcprotocol.SignedSuccResult, error) {
	log.SyslogInfo("@@@@@SignMpcBtcTransaction begin", "@@@@@txInfo", args.String())

	// count := sa.getCurrentPeerCount(args.From)
	// sa.sm.mpcDistributor.SetCurPeerCount(count)

	msgTx, err := btc.GetMsgTxFromMsgTxArgs(&args)
	if err != nil {
		return nil, err
	}

	if len(msgTx.TxIn) == 0 {
		log.SyslogErr("SignMpcBtcTransaction, invalid btc MsgTxArgs, doesn't have TxIn")
		return nil, errors.New("invalid btc MsgTxArgs, doesn't have TxIn")
	}

	rt, mpcId, err := sa.sm.mpcDistributor.CreateRequestBtcMpcSign(&args, 0)
	if err != nil {
		log.SyslogErr("@@@@@@@@@@SignMpcTransaction BTC error", "err", err.Error(), "txInfo", args.String(), "ctxId", mpcId)
		return nil, err
	}

	for i := 0; i < len(rt.SignedBtc); i++ {
		log.SyslogInfo("@@@@@@@@@@SignMpcTransaction BTC successfully", "signed", common.ToHex(rt.SignedBtc[i]), "txInfo", args.String(), "ctxId", mpcId)
	}

	return rt, err
}

func (sa *StoremanAPI) AddValidMpcBtcTx(ctx context.Context, args btc.MsgTxArgs) error {
	log.SyslogInfo("AddValidMpcBtcTx", "AddValidData BTC", args.String())
	return validator.AddValidMpcBtcTx(&args)
}

// solution one end

// solution two begin
func (sa *StoremanAPI) GetMpcBtcTransForApprove(ctx context.Context, seconds uint64) ([]btc.MsgTxArgs, error) {
	var duration time.Duration
	if seconds == 0 {
		duration = time.Second * 86400
	} else {
		duration = time.Duration(1000 * 1000 * 1000 * seconds)
	}
	return validator.GetBtcTransForApprove(duration)
}

func (sa *StoremanAPI) ApproveMpcBtcTran(ctx context.Context, args []btc.MsgTxArgs) []error {

	var argsPointer []*btc.MsgTxArgs
	for i := 0; i < len(args); i++ {
		argsPointer = append(argsPointer, &args[i])
		log.SyslogInfo("ApproveMpcBtcTran", "AddValidData BTC Approve", args[i].String())
	}
	return validator.ApproveBtcTrans(argsPointer)
}

func (sa *StoremanAPI) SignMpcBtcTranByApprove(ctx context.Context, args btc.MsgTxArgs) (*mpcprotocol.SignedSuccResult, error) {
	log.SyslogInfo("@@@@@SignMpcTransaction BTC Approve begin", "@@@@@txInfo", args.String())

	msgTx, err := btc.GetMsgTxFromMsgTxArgs(&args)
	if err != nil {
		return nil, err
	}

	if len(msgTx.TxIn) == 0 {
		log.SyslogErr("SignMpcBtcTransaction, invalid btc MsgTxArgs, doesn't have TxIn")
		return nil, errors.New("invalid btc MsgTxArgs, doesn't have TxIn")
	}

	rt, mpcId, err := sa.sm.mpcDistributor.CreateRequestBtcMpcSign(&args, 1)
	if err != nil {
		log.SyslogErr("@@@@@@@@@@SignMpcTransaction BTC Approve error", "err", err.Error(), "txInfo", args.String(), "ctxId", mpcId)
		return nil, err
	}

	for i := 0; i < len(rt.SignedBtc); i++ {
		log.SyslogInfo("@@@@@@@@@@SignMpcTransaction BTC Approve successfully", "signed", common.ToHex(rt.SignedBtc[i]), "txInfo", args.String(), "ctxId", mpcId)
	}

	return rt, err
}

func (sa *StoremanAPI) GetMpcBtcTranStatus(ctx context.Context, args btc.MsgTxArgs) ([]string, error) {

	var strRet = []string{"Not exist"}
	strRet, err := validator.GetMpcBtcTranStatus(&args)
	if err != nil {
		return nil, err
	}
	return strRet, nil
}

// solution two end

//////////////// add for BTC end

//////////////// add for xrp begin
func (sa *StoremanAPI) SignMpcXrpTransaction(ctx context.Context, args mpcprotocol.SendEcData) (*mpcprotocol.SignedDataResult, error) {
	log.SyslogInfo("@@@@@SignMpcTransaction XRP begin", "@@@@@txInfo", args.String())

	// count := sa.getCurrentPeerCount(args.PKBytes)
	// sa.sm.mpcDistributor.SetCurPeerCount(count)

	signeds, mpcId, err := sa.sm.mpcDistributor.CreateRequestXrpMpcSign(args, 0)
	if err != nil {
		log.SyslogErr("@@@@@@@@@@SignMpcTransaction XRP error", "err", err.Error(), "txInfo", args.String(), "ctxId", mpcId)
		return nil, err
	}
	log.SyslogInfo("@@@@@@@@@@SignMpcTransaction XRP successfully", "txInfo", args.String(), "ctxId", mpcId)
	return signeds, err

}

func (sa *StoremanAPI) AddValidMpcXrpTx(ctx context.Context, tx mpcprotocol.SendEcData) error {
	log.SyslogInfo("AddValidMpcXrpTx", "AddValidData Xrp", tx.String())
	return validator.AddValidMpcXrpTx(&tx)
}

/////////////// add for xrp end

///////////////  add dot begin
// SignMpcDotTranByApprove TODO: SignMpcDotTranByApprove   by liulin
func (sa *StoremanAPI) SignMpcDotTranByApprove(ctx context.Context, args mpcprotocol.SendEcData) (*mpcprotocol.SignedDataResult, error) {
	log.SyslogInfo("@@@@@SignMpcDotTranByApprove begin", "@@@@@txInfo", args.String())

	rt, mpcId, err := sa.sm.mpcDistributor.CreateRequestDotMpcSign(args, 1)
	if err != nil {
		log.SyslogErr("@@@@@@@@@@SignMpcBtcTransaction end", "err", err.Error(), "txInfo", args.String(), "ctxId", mpcId)
		return nil, err
	}

	return rt, err
}

// GetMpcDotTransForApprove TODO: GetMpcDotTransForApprove  by liulin
func (sa *StoremanAPI) GetMpcDotTransForApprove(ctx context.Context, seconds uint64) ([]mpcprotocol.SendEcData, error) {
	var duration time.Duration
	if seconds == 0 {
		duration = time.Second * 86400
	} else {
		duration = time.Duration(1000 * 1000 * 1000 * seconds)
	}
	return validator.GetDotTransForApprove(duration)
}

// ApproveMpcDotTran TODO: ApproveMpcDotTran by liulin
func (sa *StoremanAPI) ApproveMpcDotTran(ctx context.Context, args []mpcprotocol.SendEcData) []error {
	return validator.ApproveDotTrans(args)
}

////////////   add dot end

///////////    add uniMpc begin
func (sa *StoremanAPI) SignByApprove(ctx context.Context, data mpcprotocol.SignInput) (result interface{}, err error) {
	rcvTime := time.Now()
	log.SyslogInfo("@@@@@SignByApprove begin", "@@@@@SignInput", data.String(), "rcvTime", rcvTime.String())

	//// check capacity
	err, needDrop := sa.sm.mpcDistributor.NeedDrop()
	if needDrop || err != nil {
		if err != nil {
			log.SyslogErr("@@@@@SignByApprove Drop", "@@@@@SignInput", data.String(), "needDrop", needDrop, "err", err.Error())
		} else {
			log.SyslogErr("@@@@@SignByApprove Drop", "@@@@@SignInput", data.String(), "needDrop", needDrop, "err", "nil")
		}

		return mpcprotocol.SignedResult{R: []byte{}, S: []byte{}}, err
	}
	//// check input valid begin

	//uniMPC check len(hash) == len(data)
	//if len(data.HashData) != len(data.RawData) {
	//	return nil, mpcprotocol.ErrMismatchLen
	//}

	// sec256 (OK) : 340, ecdsa
	// bn256  (OK) : schnorr
	curveId := int(big.NewInt(0).SetBytes(data.Curve).Int64())
	algId := int(big.NewInt(0).SetBytes(data.Alg).Int64())
	var secValid bool
	if curveId == mpcprotocol.SK256Curve && (algId == mpcprotocol.ALGSCHNORR340 || algId == mpcprotocol.ALGECDSA || algId == mpcprotocol.ALGSCHNORR) {
		secValid = true

		smpc := schnorrmpc.NewSkSchnorrMpc()
		pt, err := smpc.StringToPt(common.ToHex(data.PKBytes))
		if err != nil {
			return nil, errors.New(fmt.Sprintf("err:%v\n", err.Error()))
		}
		if !smpc.IsOnCurve(pt) {
			return nil, errors.New(fmt.Sprint("gpk is not on sk256 curve"))
		}
	}

	var bnValid bool
	if curveId == mpcprotocol.BN256Curve && algId == mpcprotocol.ALGSCHNORR {
		bnValid = true

		smpc := schnorrmpcbn.NewBnSchnorrMpc()
		pt, err := smpc.StringToPt(common.ToHex(data.PKBytes))
		if err != nil {
			return nil, errors.New(fmt.Sprintf("err:%v\n", err.Error()))
		}
		if !smpc.IsOnCurve(pt) {
			return nil, errors.New(fmt.Sprint("gpk is not on bn256 curve"))
		}
	}

	if !(secValid || bnValid) {
		return nil, errors.New(fmt.Sprintf("%s curveId:%d algId:%d\n", mpcprotocol.ErrNotSupport, curveId, algId))
	}
	//// check input valid end

	signed, err, mpcID := sa.sm.mpcDistributor.CreateReqMpcSignUni(&data, 1)

	var timeFC time.Time
	// signed   R // s
	if err == nil {
		soStr := ""
		if so, ok := signed.(mpcprotocol.SignOutput); ok {
			soStr = so.String()

			if len(so.FinishCollect) != 0 {
				errFc := timeFC.UnmarshalBinary(so.FinishCollect[:])
				if errFc != nil {
					log.SyslogErr("SignByApprove", "UnmarshalBinary error", errFc.Error())
				}
			}
		}
		log.SyslogInfo("SignMpcTransaction SignByApprove successfully", "ctx", mpcID, "data", data.String(), "during(sec)", time.Since(rcvTime), "duringAct(sec)", time.Since(timeFC), "signResult", soStr)
	} else {
		log.SyslogErr("SignMpcTransaction SignByApprove error", "err", err.Error(), "ctx", mpcID, "data", data.String(), "during(sec)", time.Since(rcvTime), "duringAct(sec)", time.Since(rcvTime))
		return mpcprotocol.SignedResult{R: []byte{}, S: []byte{}}, err
	}

	return signed, nil
}

// non leader node polling the data received from leader node
func (sa *StoremanAPI) GetForApprove(ctx context.Context, seconds uint64) ([]mpcprotocol.SignInput, error) {
	var duration time.Duration
	if seconds == 0 {
		duration = time.Second * 86400
	} else {
		duration = time.Duration(1000 * 1000 * 1000 * seconds)
	}

	return validator.GetDataForApproveUni(duration)
}

//// non leader node Approve, and make sure that the data is really required to be signed by them.
func (sa *StoremanAPI) Approve(ctx context.Context, data []mpcprotocol.SignInput) []error {
	for _, dataItem := range data {
		log.SyslogInfo("Approve", "Approve dataItem", dataItem.String())
	}
	return validator.ApproveDataUni(data)
}

func (sa *StoremanAPI) AddValid(ctx context.Context, data *mpcprotocol.SignInput) error {
	return validator.AddValidDataUni(data)
}

///////////    add uniMpc end
