package work

import (
	"fmt"
	"lnzw/engine/args"
	"lnzw/engine/define"
	"lnzw/engine/log"
	"lnzw/engine/queue"
	"reflect"
	"sort"
	"sync"
	"sync/atomic"
	"time"

	"go.uber.org/zap"
)

const (
	WORKER_STATE_FREE = iota
	WORKER_STATE_RUN
)

const (
	PPROF_WORK_FREE = iota
	PPROF_WORK_STATISTICS
)

const (
	WORK_QUEUE_MAX_SIZE = 10000 //队列中最大数据
)

var Mgr *WorkerManager
var once sync.Once

type IWork interface {
	Init(args *args.Args)
	Run()
	QueueId() define.QueueType
}

type Worker struct {
	queueId    define.QueueType
	mpscqueue  *queue.MPSCQueue[IWork] //多生产单消费的队列
	s          int32                   //运行状态 默认是空闲状态 (空闲状态 运行状态)
	notify     chan struct{}           //用于通知worker工作来了
	pprofWorks map[string]*workPprof   //当前worker的工作处理情况分析 用于系统分析每个worker的状态（监工报表）
	pState     int32                   //性能统计状态 默认FREE状态
}

type WorkerManager struct {
	workers     map[define.QueueType]*Worker
	lockWorkers sync.Mutex
}

// work 性能统计
type workPprof struct {
	name      string  // 类名
	count     int32   // 执行总次数
	totalTime int64   // 总耗时
	avgTime   float64 //平均耗时
}

func (mgr *WorkerManager) SubmitWork(work IWork, arg ...any) {
	defer func() {
		if err := recover(); err != nil {
			log.Logger.Error("SubmitWork error", zap.Any("err", err))
		}
	}()
	queueId := work.QueueId()
	argval, _ := args.ValueOf(arg...)
	work.Init(argval)
	worker, ok := mgr.workers[queueId]
	if !ok {
		mgr.lockWorkers.Lock()
		worker = &Worker{queueId: queueId,
			mpscqueue:  queue.New[IWork](),
			notify:     make(chan struct{}),
			pprofWorks: make(map[string]*workPprof),
		}
		mgr.workers[queueId] = worker
		mgr.lockWorkers.Unlock()
		go worker.Run()
	}
	if worker.mpscqueue.Len() >= WORK_QUEUE_MAX_SIZE {
		log.Logger.Warn("worker queue is too many, give up the work", zap.Any("work", work))
		return
	}
	worker.mpscqueue.Enqueue(work)

	if atomic.LoadInt32(&(worker.s)) == WORKER_STATE_FREE {
		worker.notify <- struct{}{} //通知运行
	}
}

func (mgr *WorkerManager) removeWorker(queueId define.QueueType) {
	mgr.lockWorkers.Lock()
	delete(mgr.workers, queueId)
	mgr.lockWorkers.Unlock()
	log.Logger.Info("remove worker", zap.Any("queueId", queueId))
}

func (worker *Worker) Run() {
	defer func() {
		if err := recover(); err != nil {
			Mgr.removeWorker(worker.queueId)
			log.Logger.Error("worker Run error", zap.Any("err", err))
		}
	}()
	for {
		for {
			work := worker.mpscqueue.Dequeue()
			if work == nil {
				break
			}
			//当空闲的时候 等待通知
			atomic.StoreInt32(&(worker.s), WORKER_STATE_RUN)
			func() {
				defer func() {
					if err := recover(); err != nil {
						log.Logger.Error("work Run error", zap.Any("err", err), zap.Any("work", work))
					}
				}()
				startTime := time.Now().UnixNano()
				work.Run()
				endTime := time.Now().UnixNano()
				workTime := endTime - startTime
				//耗时统计
				worker.statisticsPprof(reflect.TypeOf(work).Elem().Name(), workTime)
			}()
		}

		//当空闲的时候 等待通知
		atomic.StoreInt32(&(worker.s), WORKER_STATE_FREE)
		<-worker.notify
	}
}

// 统计性能
func (worker *Worker) statisticsPprof(name string, workTime int64) {
	actual, rs := worker.pprofWorks[name]
	for atomic.LoadInt32(&(worker.pState)) == PPROF_WORK_STATISTICS {
	}
	if rs {
		actual.count++
		actual.totalTime += workTime
	} else {
		worker.pprofWorks[name] = &workPprof{name: name, totalTime: workTime, count: 1}
	}
}

// 打印性能情况
func (mgr *WorkerManager) PrintPprof() {
	works := make([]*workPprof, 0)
	workerInfo := ""
	//打印每个当前工作的数据
	mgr.lockWorkers.Lock()
	for _, worker := range mgr.workers {
		workerInfo += fmt.Sprintf("queueId:%d,worknum:%d\n", worker.queueId, worker.mpscqueue.Len())
		atomic.StoreInt32(&(worker.pState), PPROF_WORK_STATISTICS)
		for _, work := range worker.pprofWorks {
			works = append(works, work)
		}
		atomic.StoreInt32(&(worker.pState), PPROF_WORK_FREE)
	}
	//整合
	workGroups := make(map[string]*workPprof)
	for _, work := range works {
		if oneWork, ok := workGroups[work.name]; ok {
			oneWork.count += work.count
			oneWork.totalTime += work.totalTime
		} else {
			workGroups[work.name] = work
		}
	}
	//计算平均值
	works = works[0:0]
	for _, work := range workGroups {
		work.avgTime = float64(work.totalTime) / float64(work.count)
		works = append(works, work)
	}
	sort.Slice(works, func(i, j int) bool {
		return works[i].avgTime < works[j].avgTime
	})
	pprofStr := ""
	for _, work := range works {
		pprofStr += fmt.Sprintf("name:%s,avgTime:%0.2fms,count:%d,totalTime:%0.2fms\n", work.name, (work.avgTime / 10e6), work.count, (float64(work.totalTime) / 10e6))
	}
	mgr.lockWorkers.Unlock()
	pprofStr += workerInfo
	fmt.Println(pprofStr)
}

func Setup() {
	once.Do(func() {
		Mgr = &WorkerManager{
			workers: make(map[define.QueueType]*Worker),
		}
	})
}
