package taskx

import (
	"context"
	"math/rand"
	"runtime/debug"
	"sync/atomic"
	"time"

	"gitee.com/hasika/gotool/container"
	stderror "gitee.com/hasika/gotool/errorx"
	log "gitee.com/hasika/gotool/logx"
)

var EnableTaskTracing = false
var TracingDuration = time.Millisecond * 100

const TimeOut stderror.ConstStringErr = "task time out"
const Cancel stderror.ConstStringErr = "task canceled"

type Task[T any] struct {
	seq      int64
	Input    func() (T, error)
	Callback Callback[T]
}

type Callback[T any] interface {
	OnTaskDone(T, error)
	Cancel()
}

type taskResult[T any] struct {
	output T
	err    error
}

type Promise[T any] struct {
	resultCh chan taskResult[T]
	ctx      context.Context
	cancel   context.CancelFunc
}

func NewPromise[T any](ctx context.Context) Promise[T] {
	ctx, cancel := context.WithCancel(ctx)
	p := Promise[T]{
		resultCh: make(chan taskResult[T], 1),
		ctx:      ctx,
		cancel:   cancel,
	}
	return p
}

func (p Promise[T]) OnTaskDone(i T, err error) {
	p.resultCh <- taskResult[T]{output: i, err: err}
}

func (p Promise[T]) WaitDuration(duration time.Duration) (T, error) {
	start := time.Now()
	if EnableTaskTracing {
		defer func() {
			since := time.Since(start)
			if since > TracingDuration {
				log.Warnf("call time too long %s", since)
			}
		}()
	}
	var t T
	ticker := time.NewTicker(duration)
	defer ticker.Stop()
	defer p.cancel()
	select {
	case <-p.ctx.Done():
		return t, Cancel
	case <-ticker.C:
		return t, TimeOut
	case v, ok := <-p.resultCh:
		if ok {
			return v.output, v.err
		} else {
			return t, Cancel
		}
	}
}

func (p Promise[T]) Wait() (T, error) {
	return p.WaitDuration(time.Second * 5)
}

func (p Promise[T]) Cancel() {
	p.cancel()
}

type DispatchCall[T any] struct {
	fun func(T, error)
}

func NewAsyncCall[T any](fun func(T, error)) DispatchCall[T] {
	return DispatchCall[T]{fun: fun}
}

func (a DispatchCall[T]) OnTaskDone(i T, err error) {
	a.fun(i, err)
}

func (a DispatchCall[T]) Cancel() {
}

type Pool[T any] struct {
	channels  []chan Task[T]
	ctx       context.Context
	seq       atomic.Int64
	taskMap   container.MapInterface[int64, struct{}]
	num       int64
	name      string
	bufferLen int
}

func (t *Pool[T]) loop(index int64) {
	channels := t.channels[index]
	for {
		select {
		case <-t.ctx.Done():
			return
		case job := <-channels:
			t.processOneJob(&job)
		}
	}
}

func (t *Pool[T]) processOneJob(job *Task[T]) {
	_, ex := t.taskMap.LoadAndDelete(job.seq)
	if !ex {
		var cancelRsp T
		if job.Callback != nil {
			job.Callback.OnTaskDone(cancelRsp, Cancel)
		}
		return
	}
	defer func() {
		if err := recover(); err != nil {
			log.Errorf("recover panic stack %s", string(debug.Stack()))
		}
	}()
	rsp, err := job.Input()
	if job.Callback != nil {
		job.Callback.OnTaskDone(rsp, err)
	}
}

func (t *Pool[T]) CancelTask(id int64) {
	t.taskMap.Delete(id)
}

func (t *Pool[T]) AddTask(f func() (T, error), callback Callback[T], index ...int64) int64 {
	var realIndex = rand.Int63n(t.num)
	if len(index) == 1 {
		realIndex = index[0] % t.num
	}
	targetChan := t.channels[realIndex]
	seq := t.seq.Add(1)
	task := Task[T]{
		seq:      seq,
		Input:    f,
		Callback: callback,
	}

	if len(targetChan) > int(float32(t.bufferLen)*0.9) {
		log.Errorf("任务队列%s容量不足,当前长度%d,最大长度%d", t.name, len(targetChan), t.bufferLen)
	}
	t.taskMap.Store(seq, struct{}{})
	targetChan <- task
	return seq
}

func NewTaskPool[T any](ctx context.Context, num int64, bufferLen int, name string) *Pool[T] {
	pool := &Pool[T]{
		ctx:       ctx,
		num:       num,
		name:      name,
		bufferLen: bufferLen,
		taskMap:   container.NewSyncMap[int64, struct{}](),
	}
	for index := int64(0); index < num; index++ {
		pool.channels = append(pool.channels, make(chan Task[T], bufferLen))
		gindex := index
		go pool.loop(gindex)
	}
	return pool
}
