package runload

import (
	"gitee.com/kingzyt/common/batchMallocer"
	"gitee.com/kingzyt/common/behaviorTree"
	"gitee.com/kingzyt/common/log"
	. "gitee.com/kingzyt/common/ringlist"
	. "gitee.com/kingzyt/common/util"
	"runtime"
	"strconv"
	"sync"
	"time"
)

type IRobot interface {
	behaviorTree.IRuntimeBTData

	ChangeProxiedUserName(name string) (oldName string)
	GetProxiedUserName() string

	Start() bool
	Quit()
	QuitOK() bool

	Lock()
	Unlock()

	Update(dtNs int64)
	GetUpdateCnt() int64

	GetActionStats(proc func(stats map[string]*ActionStat))
}

type RunloaderRunningMode int8

const (
	Mode_Idx      RunloaderRunningMode = 0
	Mode_NameList RunloaderRunningMode = 1
)

type RunloaderOpenParams struct {
	Mode RunloaderRunningMode

	IdxNamePrefix string
	IdxCount      int
	IdxOffset     int

	NameList []string

	EachRunTS int64
}

func NewRunloaderOpenParams() *RunloaderOpenParams {
	return &RunloaderOpenParams{
		IdxNamePrefix: "r",
		IdxCount:      1,
	}
}

type Runloader struct {
	name                    string
	oneBatchRobotNum        int
	eachRobotUpdateInterval time.Duration
	robotCount              int
	openParams              *RunloaderOpenParams
	mutex                   sync.Mutex

	bt *behaviorTree.BehaviorTree

	// rList is const after Run, no lock
	rList                 []IRobot
	proxiedUserNames      *RingList
	proxiedUserNamesMutex sync.Mutex

	NewRobot func(robotId int, proxiedUserName string) IRobot
}

func NewRunloader(name string, btRootNode behaviorTree.INode) *Runloader {
	bt := behaviorTree.NewBehaviorTree(name)
	bt.SetRoot(btRootNode)

	return &Runloader{
		name:             name,
		bt:               bt,
		proxiedUserNames: NewRingList(nil, 0),
	}
}

func (self *Runloader) Open(oneBatchRobotNum int, eachRobotUpdateInterval time.Duration, params *RunloaderOpenParams) bool {
	if oneBatchRobotNum <= 0 {
		log.Error(self.name, "oneBatchRobotNum(%d) should be > 0", oneBatchRobotNum)
		return false
	}
	if eachRobotUpdateInterval <= 0 {
		log.Error(self.name, "eachRobotUpdateInterval(%s) should be > 0", eachRobotUpdateInterval)
		return false
	}

	self.oneBatchRobotNum = oneBatchRobotNum
	self.eachRobotUpdateInterval = eachRobotUpdateInterval
	switch params.Mode {
	case Mode_Idx, Mode_NameList:
	default:
		log.Error(self.name, "mode [%s] is not supported", params.Mode)
		return false
	}

	self.openParams = params

	proxiedUserCnt := 0
	switch params.Mode {
	case Mode_Idx:
		proxiedUserCnt = self.openParams.IdxCount
		if proxiedUserCnt > 1 {
			for i := 0; i < proxiedUserCnt; i++ {
				self.proxiedUserNames.PushTail(self.openParams.IdxNamePrefix + strconv.Itoa(i+self.openParams.IdxOffset))
			}
		} else {
			self.proxiedUserNames.PushTail(self.openParams.IdxNamePrefix)
		}
	case Mode_NameList:
		proxiedUserCnt = len(self.openParams.NameList)
		for i := 0; i < proxiedUserCnt; i++ {
			self.proxiedUserNames.PushTail(self.openParams.NameList[i])
		}
	}
	self.robotCount = IntMin(proxiedUserCnt, self.oneBatchRobotNum)

	return true
}

/*
若时间片的长度为50ms,则在3s的周期中有60个片,
5k的robot同时的话,一个片分得约83个人,这样基本上同时83个人登陆不会造成问题,
所以这里间接地达成了逐个延时登陆的效果,虽然是逐步分批登陆,
若在此基础上再继续按robot的特性分多种bt的话,就有多个runloader同时跑,
在外部逐个等待一段较长的时间来使得互相之间的robot登陆时间错开较好
*/
func (self *Runloader) Run() bool {
	for i := 0; i < self.robotCount; i++ {
		robotId := i

		proxiedUserName := self.proxiedUserNames.PopHead().(string)
		robot := self.NewRobot(robotId, proxiedUserName)
		if !robot.Start() {
			log.Error("runload", "Robot started [FAILED]. robotId:%d", robotId)
			return false
		}

		self.rList = append(self.rList, robot)
	}

	/*
		一个时间片如果100ms,这样如果一个robot的bt更新周期为3s的话,一个周期内就有30个时间片
		然后我们平摊N个robot到30个时间片中,这样就能在连续的update中保证cpu的适度均匀运转,
		同时保证每一个robot的两次update间隔保持为一个周期的时长,
	*/
	robotCnt := len(self.rList)
	runPieceCnt := runtime.NumCPU() * 2

	defaultProcMsgAvgTime := time.Millisecond * 3
	subBatchCnt := 1
	if self.eachRobotUpdateInterval > defaultProcMsgAvgTime {
		subBatchCnt = int(self.eachRobotUpdateInterval / defaultProcMsgAvgTime)
	}

	// run runPieceCnt lines parallelly
	err := batchMallocer.ProcBatches(robotCnt, runPieceCnt, nil, func(beginIdx int, endIdx int) bool {
		eachRobotRunSumT := int64(0)

		beIterator := batchMallocer.NewDynamicBatchMallocer(endIdx-beginIdx, subBatchCnt)
		batchInterval := time.Duration(int64(self.eachRobotUpdateInterval) / int64(beIterator.GetFinalBatchCnt()))

		go UpdateProcWithDt(batchInterval, true, func(dtNs int64) (ok bool) {
			subBeginOffest, subEndOffest, _ := beIterator.GetNextBatch(true)

			if self.openParams.EachRunTS > 0 {
				eachRobotRunSumT += dtNs
				if eachRobotRunSumT >= self.openParams.EachRunTS*int64(time.Second) {
					eachRobotRunSumT = 0

					// change all robot in this line
					for i := beginIdx; i < endIdx; i++ {
						r := self.rList[i]
						func() {
							self.proxiedUserNamesMutex.Lock()
							defer self.proxiedUserNamesMutex.Unlock()

							name := self.proxiedUserNames.PopHead()
							// if does not get idx, just waiting for next check
							if name != nil {
								// if get name, change user, and relogin, otherwise just going on
								oldName := r.ChangeProxiedUserName(name.(string))
								self.proxiedUserNames.PushTail(oldName)
							}
						}()
					}
				}
			}

			for i := beginIdx + subBeginOffest; i < beginIdx+subEndOffest; i++ {
				go func(idx int) {
					r := self.rList[idx]
					defer DeferRecoverPanic(r.GetProxiedUserName())

					r.Lock()
					defer r.Unlock()
					r.Update(dtNs)
					self.bt.Update(r, dtNs)
				}(i)
			}

			return true
		})

		return true
	})
	if err != nil {
		log.Error("runload", "batchMallocer.ProcBatches, err:%s", err)
		return false
	}

	return true
}

func (self *Runloader) Quit() {
	wg := sync.WaitGroup{}
	wg.Add(len(self.rList))

	for i := 0; i < len(self.rList); i++ {
		r := self.rList[i]
		r.Quit()

		go func() {
			defer wg.Done()

			quitChecker := time.NewTicker(time.Millisecond * 100).C
			for {
				<-quitChecker
				if r.QuitOK() {
					break
				}
			}
		}()

		if i > 0 && i%1000 == 0 {
			time.Sleep(time.Millisecond * 200)
		}
	}

	wg.Wait()
}

func (self *Runloader) GetRunningNodes() map[string]int {
	runningNodeStats := map[string]int{}

	for i := 0; i < len(self.rList); i++ {
		runningNodeStats[self.rList[i].GetCurRunningNodeName()]++
	}
	return runningNodeStats
}

// will reset avg data
func (self *Runloader) GetActionsStat() map[string]*ActionStat {
	actionsStats := map[string]*ActionStat{}

	statMutex := &sync.RWMutex{}

	for i := 0; i < len(self.rList); i++ {
		self.rList[i].GetActionStats(func(rStat map[string]*ActionStat) {
			for actionId, stat := range rStat {
				var totalStat *ActionStat
				_, ok := actionsStats[actionId]
				if ok {
					totalStat = actionsStats[actionId]
				} else {
					totalStat = NewActionStat(actionId, statMutex)
					actionsStats[actionId] = totalStat
				}
				totalStat.Merge(stat)
				stat.ResetAvg() // for cycle show, reset avg data
			}
		})
	}

	return actionsStats
}
