package manager

import (
	"encoding/json"
	"sync"
	"sync/atomic"

	"sieve_engine/lib/config"
)

var (
	concurrency        = *config.Int("sieve.schedule.concurrency", 100, "redis channel")
	maxGorCountPerTask = *config.Int("sieve.config.max.gor.per.task", 3000, "max gor per task")
	maxGorCount        = *config.Int("config.redis.max.gor.count", 800000, "max gor count")
	fetchCountOneTime  = *config.Int("config.redis.fetch.onetime", 10, "fetch one time")
	lazyInitOnce       = sync.Once{}
)

type TaskConfig struct {
	fetchCountOnTime   int           // 一次从 redis获取的料子数量
	minGorCount        int32         // 任务空闲的时候(指的是同时跑的筛料任务的数量,要做到尽可能多的协程跑)
	maxGorCount        int32         // 任务紧张的时候(指的是很多筛料任务在跑的时候的数量,需要考虑系统协程调度的负担和数量限制)
	maxGorCountPerTask int32         // 任务紧张的时候(指的是很多筛料任务在跑的时候的数量,需要考虑系统协程调度的负担和数量限制)
	runningGors        atomic.Int32  // 实际筛料的协程数量
	concurrency        int32         // 指的是同时运行的任务数量的设定值
	runningTasks       *atomic.Int32 // 指的是同时运行的任务数量
}

func getDefaultTaskConfig() *TaskConfig {
	return &TaskConfig{
		fetchCountOnTime:   fetchCountOneTime,
		maxGorCount:        int32(maxGorCount),
		maxGorCountPerTask: int32(maxGorCountPerTask),
		concurrency:        int32(concurrency),
		runningTasks:       &atomic.Int32{},
	}
}

func getTaskConfig() *TaskConfig {

	m := mConfig.taskConfig
	m.runningTasks = &atomic.Int32{}
	return m
}

func setTaskConfig(data *TaskConfig) error {

	mConfig.taskConfig.merge(data.transfer())
	return mConfig.flushTask()
}

func (t *TaskConfig) sync(data *taskConfigSync) {

	t.fetchCountOnTime = data.FetchCountOnTime
	t.maxGorCount = data.MaxGorCount
	t.maxGorCountPerTask = data.MaxGorCountPerTask
	t.concurrency = data.Concurrency
	t.runningGors.Store(data.RunningGors)
	t.runningTasks.Store(data.RunningTasks)
}

func (t *TaskConfig) merge(data *taskConfigSync) *TaskConfig {

	if data != nil {
		t.sync(data)
	}

	return t
}

func (t *TaskConfig) transfer() *taskConfigSync {

	return &taskConfigSync{
		FetchCountOnTime:   t.fetchCountOnTime,
		MaxGorCount:        t.maxGorCount,
		MaxGorCountPerTask: t.maxGorCountPerTask,
		Concurrency:        t.concurrency,
		RunningGors:        t.runningGors.Load(),
		RunningTasks:       t.runningTasks.Load(),
	}
}

type taskConfigSync struct {
	FetchCountOnTime   int   `json:"fetch_count_on_time" redis:"fetch_count_on_time"`
	MaxGorCount        int32 `json:"max_gor_count" redis:"max_gor_count"`
	MaxGorCountPerTask int32 `json:"max_gor_count_per_task" redis:"max_gor_count_per_task"`
	Concurrency        int32 `json:"concurrency" redis:"concurrency"`
	RunningGors        int32 `json:"running_gors" redis:"running_gors"`
	RunningTasks       int32 `json:"running_tasks" redis:"running_tasks"`
}

func (c *taskConfigSync) string() string {

	b, _ := json.Marshal(c)
	return string(b)
}
