package task_pool

import (
	errors2 "channel/task_pool/errors"
	"channel/task_pool/task"
	"context"
	"log"
	"sync"
	"sync/atomic"
	"time"
)

/*
任务池作用
1. 限制并发数量，避免资源耗尽
2. 复用资源
3. 能够优雅关闭
4. 动态调度任务
*/

type OnDemandTaskPool struct {
	// initGNum 初始启动最少的goroutine数量
	// 常驻goroutine数量，常驻说明在taskPool运行过程中至少有initGNum个goroutine并发，未设置超时时间
	// 大部分数值用int32原因: 方便原子操作
	initGNum int32
	// coreGNum goroutine数量阈值
	// （initGNum, coreGNum] 在该范围内创建的goroutine为临时goroutine, 处理完任务后设置超时时间，空闲时间内未复用将关闭
	// （coreGNum, MaxGNum] 在该范围内创建的goroutine在使用完成后积极关闭
	coreGNum int32
	// MaxGNum 最大并发goroutine数量 控制并发数量，避免资源耗尽
	MaxGNum int32
	// NumGNum 当前任务数量
	NumGNum int32

	// queue 任务队列
	queue chan task.Task
	// queueBacklogRate 队列积压率 当goroutine数量处于（coreGNum, MaxGNum]且当前队列积压率>queueBacklogRate时，创建新的goroutine
	// 当前队列积压率 = float64(len(queue)) / float64(cap(queue))
	queueBacklogRate float64
	// runningTaskNum 运行中的任务数量
	runningTaskNum int32
	// id goroutine唯一标识，主要用于标记管理空闲goroutine
	id int32
	// maxIdleTime 最大空闲时间
	maxIdleTime time.Duration
	// idleGroup 空闲goroutine队列
	idleGroup sync.Map
	// state taskPool内部状态
	state int32

	shutdownCtx    context.Context
	shutdownCancel context.CancelFunc

	mutex sync.Mutex
}

type Opt func(o *OnDemandTaskPool)

func WithCoreGNum(coreGNum int32) Opt {
	return func(o *OnDemandTaskPool) {
		o.coreGNum = coreGNum
	}
}

func WithMaxGNum(MaxGNum int32) Opt {
	return func(o *OnDemandTaskPool) {
		o.MaxGNum = MaxGNum
	}
}

func WithMaxIdleTime(maxIdleTime time.Duration) Opt {
	return func(o *OnDemandTaskPool) {
		o.maxIdleTime = maxIdleTime
	}
}

func WithQueueBacklogRate(queueBacklogRate float64) Opt {
	return func(o *OnDemandTaskPool) {
		o.queueBacklogRate = queueBacklogRate
	}
}

func NewOnDemandTaskPool(initGNum int32, cap int32, Opts ...Opt) (*OnDemandTaskPool, error) {
	o := &OnDemandTaskPool{
		initGNum: initGNum,
		coreGNum: initGNum,
		MaxGNum:  initGNum,

		queue: make(chan task.Task, cap),

		maxIdleTime: defaultIdleTimeout,
		state:       StateCreated,
	}

	for _, opt := range Opts {
		opt(o)
	}

	err := o.validateParams()
	if err != nil {
		return nil, err
	}

	o.shutdownCtx, o.shutdownCancel = context.WithCancel(context.Background())

	return o, nil
}

func (o *OnDemandTaskPool) validateParams() error {
	// need: 0 < initGNum <= coreGNum <= MaxGNum;
	ok := o.initGNum > 0 && o.initGNum <= o.coreGNum && o.coreGNum <= o.MaxGNum
	if !ok {
		// 特殊情况：设置了coreGNum，没设置MaxGNum(没设置就等于initGNum)
		if o.coreGNum > o.initGNum && o.MaxGNum == o.initGNum {
			o.MaxGNum = o.coreGNum
		} else {
			return errors2.ErrNumGRelated(o.initGNum, o.coreGNum, o.MaxGNum)
		}

	}

	// 0 <= 积压率 < 1
	// 积压率=0时，说明不允许挤压
	// 积压率=1说明永远挤压，不合理
	ok = o.queueBacklogRate >= float64(0) && o.queueBacklogRate < float64(1)
	if !ok {
		return errors2.ErrQueueBacklogRate(o.queueBacklogRate)
	}
	return nil
}

func (o *OnDemandTaskPool) GetStateString() (string, error) {
	switch o.state {
	case StateCreated:
		return "created", nil
	case StateRunning:
		return "running", nil
	case StateClosing:
		return "closing", nil
	case StateStopped:
		return "stopped", nil
	case StateLocked:
		return "locked", nil
	default:
		return "", errors2.ErrUnknownState(o.state)
	}
}

func (o *OnDemandTaskPool) InternalState() int32 {
	for {
		state := atomic.LoadInt32(&o.state)
		if state != StateLocked {
			return state
		}
	}
}

func (o *OnDemandTaskPool) Submit(ctx context.Context, t task.Task) error {
	if ctx.Err() != nil {
		return ctx.Err()
	}
	if t == nil {
		return errors2.ErrTask
	}

	for {

		ok := o.state == StateCreated || o.state == StateRunning
		if !ok {
			state, err := o.GetStateString()
			if err != nil {
				return err
			}
			return errors2.ErrSubmitState(state)
		}

		if o.state == StateCreated && len(o.queue) == cap(o.queue) {

			return errors2.ErrQueueFull

		}

		var err error
		ok, err = o.trySubmit(ctx, &task.Wrapper{
			Task: t,
		}, o.state)
		if ok {
			// 提交成功
			return nil
		}
		if err != nil {
			// 提交出错
			return err
		}
		/*
			上面两行可以合并:
			if ok || err != nil {
				return err
			}
		*/

		// 此处：ok=false且err=nil 即本次尝试提交未成功
		// for循环再次尝试提交直至提交成功或超时、cancel

	}
}

// allowCreateG 是否允许创建goroutine
// 允许条件：已创建goroutine数量小于MaxGNum，且任务堆积率大于期望堆积率，需要加速处理任务
// allowCreateG 有时会不准，因为：task在goroutine(id int)方法运行过程中才被取走（需要时间），但提交任务时可能并发提交了很多导致下面allowCreateG的len(o.queue)没有变化
// 就是说：在高并发时，len(o.queue)还没来的及变化（任务没被取走）导致创建了很多goroutine额外的
func (o *OnDemandTaskPool) allowCreateG() bool {
	o.mutex.Lock()
	defer o.mutex.Unlock()
	if o.NumGNum >= o.MaxGNum {
		return false
	}

	rate := float64(len(o.queue)) / float64(cap(o.queue))
	return rate > o.queueBacklogRate
}

// goroutine 并发开启goroutine
func (o *OnDemandTaskPool) goroutine(id int32) {
	idleTimer := time.NewTimer(0) // 用于控制当前goroutine无任务状态下的空闲时长
	if !idleTimer.Stop() {
		<-idleTimer.C
	}

	for {
		select {
		case <-o.shutdownCtx.Done():
			// 关闭goroutine
			atomic.AddInt32(&o.NumGNum, -1)
			return
		case <-idleTimer.C:
			// 关闭goroutine
			o.idleGroup.Delete(id) // 从空闲队列中删除goroutine
			atomic.AddInt32(&o.NumGNum, -1)
			return
		case t, ok := <-o.queue:
			// 如果是空闲goroutine取到任务处理
			// 1. 需要从空闲队列中移除
			// 2. 关闭定时器
			_, exist := o.idleGroup.Load(id)
			if exist {
				o.idleGroup.Delete(id)
				if !idleTimer.Stop() {
					// 在调用idleTimer.Stop()前计时器已经触发或停止
					<-idleTimer.C // 处理掉数据避免阻塞死锁
				}
			}

			if !ok {
				// ok = false 即o.queue队列被关闭；比如shutdown时会close(o.queue)
				if atomic.AddInt32(&o.NumGNum, -1) == 0 {
					// 关闭到最后一个goroutine时需要
					// 1. 修改任务状态为已关闭
					// 2. 通知优雅关闭（shutdown）任务处理完成
					if atomic.CompareAndSwapInt32(&o.state, StateClosing, StateStopped) {
						o.shutdownCancel()
					}
				}
				return
			}

			atomic.AddInt32(&o.runningTaskNum, 1)
			err := t.Run(o.shutdownCtx)
			if err != nil {
				log.Println(err)
			}
			atomic.AddInt32(&o.runningTaskNum, -1)

			o.mutex.Lock()

			// 任务处理完成，处理当前goroutine是关闭 or 放入空闲map中(临时goroutine) or 作为常驻goroutine保持运行

			// 关闭条件:
			// goroutine数量在(coreGNum, MaxGNum] 无任务需要处理 或者 goroutine数量大于任务数量(供大于求)

			scope := o.NumGNum > o.coreGNum && o.NumGNum <= o.MaxGNum
			noExecuteTask := len(o.queue) == 0 || o.NumGNum > int32(len(o.queue))
			if scope && noExecuteTask {
				o.NumGNum--
				o.mutex.Unlock()
				return
			}

			// 放入空闲map条件: 除了常驻的都放入空闲map
			count := int32(0)
			o.idleGroup.Range(func(key, value any) bool {
				count++
				return true
			})
			// o.NumGNum - count 表示当前未添加超时时间的goroutine数量
			// o.NumGNum - count > o.initGNum 表示有多出来的goroutine需要作为临时goroutine添加空闲时间管理
			if o.NumGNum-count > o.initGNum {
				idleTimer = time.NewTimer(o.maxIdleTime)
				o.idleGroup.Store(id, "")
			}

			o.mutex.Unlock()
		}
	}
}

func (o *OnDemandTaskPool) trySubmit(ctx context.Context, task task.Task, state int32) (bool, error) {
	// 原子操作实现加锁，不直接用锁原因：后面许多小部分需要用锁 锁是不可重复上锁的
	if atomic.CompareAndSwapInt32(&o.state, state, StateLocked) {
		defer atomic.CompareAndSwapInt32(&o.state, StateLocked, state)

		select {
		case <-ctx.Done():
			return false, ctx.Err()
		case o.queue <- task:
			// 运行中状态允许创建goroutine就创建goroutine，不允许就在o.queue中排队等待处理
			if state == StateRunning && o.allowCreateG() {
				atomic.AddInt32(&o.NumGNum, 1)
				go o.goroutine(atomic.AddInt32(&o.id, 1))
			}
			return true, nil
		default:
			// 不能阻塞在加锁状态
			return false, nil
		}
	}
	return false, nil
}

func (o *OnDemandTaskPool) needCreatedGNum() int32 {
	o.mutex.Lock()
	defer o.mutex.Unlock()

	queueLen := int32(len(o.queue))

	n := o.initGNum // 至少启动initGNum个常驻goroutine
	if queueLen >= o.MaxGNum {
		// 任务数大于MaxGNum时,需要开启MaxGNum个goroutine
		n = o.MaxGNum
	} else {
		if queueLen > n {
			// 任务数在 (initGNum, MaxGNum)则需要开启任务数个goroutine
			n = queueLen
		}
		// 任务数小于initGNum时,也需要开始initGNum个常驻goroutine等待处理任务
	}

	return n
}

func (o *OnDemandTaskPool) Start() error {
	for {
		if !(o.state == StateCreated) {
			state, err := o.GetStateString()
			if err != nil {
				return err
			}
			return errors2.ErrStartState(state)
		}

		if atomic.CompareAndSwapInt32(&o.state, StateCreated, StateLocked) {
			n := o.needCreatedGNum()
			atomic.AddInt32(&o.NumGNum, n)
			for range n {
				go o.goroutine(atomic.AddInt32(&o.id, 1))
			}

			atomic.CompareAndSwapInt32(&o.state, StateLocked, StateRunning)
			return nil
		}
	}
}

func (o *OnDemandTaskPool) Shutdown() (<-chan struct{}, error) {
	for {
		if !(o.state == StateRunning) {
			state, err := o.GetStateString()
			if err != nil {
				return nil, err
			}
			return nil, errors2.ErrStopState(state)
		}
		if atomic.CompareAndSwapInt32(&o.state, StateRunning, StateClosing) {
			close(o.queue)
			return o.shutdownCtx.Done(), nil
		}
	}

}

func (o *OnDemandTaskPool) ShutdownNow() ([]task.Task, error) {
	for {
		if !(o.state == StateRunning) {
			state, err := o.GetStateString()
			if err != nil {
				return nil, err
			}
			return nil, errors2.ErrStopState(state)
		}

		if atomic.CompareAndSwapInt32(&o.state, StateRunning, StateStopped) {
			close(o.queue)
			o.shutdownCancel()
			tasks := make([]task.Task, 0)
			for t := range o.queue {
				tasks = append(tasks, t)
			}
			return tasks, nil
		}
	}
}

func (o *OnDemandTaskPool) States(ctx context.Context, interval time.Duration) (<-chan State, error) {
	if ctx.Err() != nil {
		return nil, ctx.Err()
	}
	if o.shutdownCtx.Err() != nil {
		return nil, o.shutdownCtx.Err()
	}

	ch := make(chan State, 1)
	go func() {
		timer := time.NewTicker(interval)
		defer timer.Stop()
		for {
			select {
			case <-ctx.Done():
				o.setState(ch, time.Now().UnixNano())
				close(ch)
				return
			case <-o.shutdownCtx.Done():
				close(ch)
				o.setState(ch, time.Now().UnixNano())
				return
			case t := <-timer.C:
				o.setState(ch, t.UnixNano())
			}
		}
	}()

	return ch, nil
}

func (o *OnDemandTaskPool) setState(ch chan State, timeStamp int64) {
	select {
	case ch <- o.getState(timeStamp):
	default: // 避免ch没被接收而阻塞
	}
}

func (o *OnDemandTaskPool) getState(timeStamp int64) State {
	return State{
		State:           atomic.LoadInt32(&o.state),
		RunningTaskNum:  atomic.LoadInt32(&o.runningTaskNum),
		WaitingTasksNum: int32(len(o.queue)),
		TaskCap:         int32(cap(o.queue)),
		NumG:            atomic.LoadInt32(&o.NumGNum),
		TimeStamp:       timeStamp,
	}
}
