package pool

import (
	"github.com/618lf/swakx-go/pkg/collection/func_chans"
	"github.com/618lf/swakx-go/pkg/logger"
	"github.com/618lf/swakx-go/pkg/tools/gos"
	"github.com/618lf/swakx-go/pkg/tools/ints"
	"github.com/618lf/swakx-go/pkg/tools/sys"
	perrors "github.com/pkg/errors"
	"math/rand"
	"runtime/debug"
	"sync"
	"sync/atomic"
)

var (
	Error_Executor_Closed = perrors.New("executor is closed")
	Default_Workers       = 10
	Default_Queues        = 1
	Default_QueueSize     = 128
	log                   = logger.GetLogger()
)

type Executor interface {
	// Submit adds a task to queue asynchronously.
	Submit(func()) error
	// SubmitSync adds a task to queue synchronously.
	SubmitSync(func()) error
	// Close closes the worker pool
	Close()
	// IsClosed returns close status of the worker pool
	IsClosed() bool
	// Workers return workers
	Workers() int
}

// ///////////////////////////////////////
// 任务队列
// ///////////////////////////////////////
type tasks interface {
	in(int) chan<- func()
	out(int) <-chan func()
	close()
}

type boundedTasks struct {
	queues     int
	queueSize  int
	taskQueues []chan func()
}

func (b *boundedTasks) in(idx int) chan<- func() {
	return b.taskQueues[idx]
}

func (b *boundedTasks) out(idx int) <-chan func() {
	return b.taskQueues[idx]
}

func (b *boundedTasks) close() {
	for i := range b.taskQueues {
		close(b.taskQueues[i])
	}
}

func newBoundedTasks(queues int, queueSize int) tasks {
	ts := &boundedTasks{
		queues:     queues,
		queueSize:  queueSize,
		taskQueues: make([]chan func(), queues),
	}

	for i := 0; i < queues; i++ {
		ts.taskQueues[i] = make(chan func(), queueSize)
	}

	return ts
}

type unBoundedTasks struct {
	queues     int
	queueSize  int
	taskQueues []*func_chans.UnboundedChan
}

func (b *unBoundedTasks) in(idx int) chan<- func() {
	return b.taskQueues[idx].In()
}

func (b *unBoundedTasks) out(idx int) <-chan func() {
	return b.taskQueues[idx].Out()
}

func (b *unBoundedTasks) close() {
	for i := range b.taskQueues {
		b.taskQueues[i].Close()
	}
}

func newUnBoundedTasks(queues int, queueSize int) tasks {
	ts := &unBoundedTasks{
		queues:     queues,
		queueSize:  queueSize,
		taskQueues: make([]*func_chans.UnboundedChan, queues),
	}

	for i := 0; i < queues; i++ {
		ts.taskQueues[i] = func_chans.NewUnboundedChan(queueSize)
	}

	return ts
}

// ///////////////////////////////////////
// executor
// ///////////////////////////////////////
type executor struct {
	workers   int
	queues    int
	queueSize int

	idx        uint32 // round robin index
	taskQueues tasks
	wg         sync.WaitGroup

	once sync.Once
	done chan struct{}
}

// Submit submit task
func (e *executor) Submit(t func()) error {
	if t == nil {
		return perrors.New("task shouldn't be nil")
	}

	if e.IsClosed() {
		return nil
	}

	index := atomic.AddUint32(&e.idx, 1) % uint32(e.queues)

	// add task to queue
	select {
	case <-e.done:
		return Error_Executor_Closed
	case e.taskQueues.in(int(index)) <- t:
		return nil
	default:
	}

	// put the task to a random queue with a maximum of len(e.taskQueues)/2 attempts
	for i := 0; i < e.queues/2; i++ {
		select {
		case e.taskQueues.in(rand.Intn(e.queues)) <- t:
			return nil
		default:
			continue
		}
	}

	// wait for add success
	e.taskQueues.in(int(index)) <- t

	// add success
	return nil
}

// SubmitSync sync submit task
func (e *executor) SubmitSync(t func()) error {
	done := make(chan struct{})
	fn := func() {
		defer close(done)
		t()
	}

	if err := e.Submit(fn); err != nil {
		return err
	}

	<-done
	return nil
}

// IsClosed check whether closed
func (e *executor) IsClosed() bool {
	select {
	case <-e.done:
		return true
	default:
		return false
	}
}

func (e *executor) Close() {
	if closed := e.IsClosed(); closed {
		return
	}
	e.stop()
	e.wg.Wait()
	e.taskQueues.close()
}

func (e *executor) stop() {
	select {
	case <-e.done:
		return
	default:
		e.once.Do(func() {
			close(e.done)
		})
	}
}

func (e *executor) dispatch() {
	for i := 1; i <= e.workers; i++ {
		e.wg.Add(1)
		e.newWorker(i, e.taskQueues.out(i%e.queues))
	}
}

func (e *executor) newWorker(id int, queue <-chan func()) {
	gos.GoSafely(nil, false, func() {
		err := e.runWorker(id, queue)
		if err != nil && log != nil {
			log.Errorf("gost/TaskPool.run error: %s", err.Error())
		}
	}, nil)
}

func (e *executor) runWorker(id int, queue <-chan func()) error {
	defer e.wg.Done()

	for {
		select {
		case <-e.done:
			if length := len(queue); length > 0 {
				return perrors.Errorf("task worker %d exit now while its task buffer length %d is greater than 0",
					id, length)
			}
			return nil

		case t, ok := <-queue:
			if ok && t != nil {
				e.safeRun(t)
			}
		}
	}
}

func (e *executor) safeRun(t func()) {
	defer func() {
		if r := recover(); r != nil {
			if log != nil {
				log.Errorf("goroutine panic: %v\n%s", r, string(debug.Stack()))
			}
		}
	}()

	// execute task
	t()
}

func (e *executor) Workers() int {
	return e.workers
}

// NewUnboundedExecutor workers =  Thread Num with unbounded queue
func NewUnboundedExecutor(initCap int) (Executor, error) {
	return NewExecutor(sys.GetThreadNum(), sys.GetThreadNum(), initCap, true)
}

// NewBoundedExecutor workers =  Thread Num
func NewBoundedExecutor(queueSize int) (Executor, error) {
	return NewExecutor(sys.GetThreadNum(), sys.GetThreadNum(), queueSize, false)
}

// NewExecutor create executor
func NewExecutor(workers int, queues int, queueSize int, unbounded bool) (Executor, error) {

	if workers < 0 {
		workers = Default_Workers
	}

	if queues < 0 {
		queues = Default_Queues
	}

	if queueSize < 0 {
		queueSize = Default_QueueSize
	}

	// get min workers
	workers = ints.IntMin(sys.GetThreadNum(), workers)

	// queues smaller then workers
	queues = ints.IntMin(queues, workers)

	// create executor
	e := &executor{
		workers:   workers,
		queues:    queues,
		queueSize: queueSize,
		done:      make(chan struct{}),
	}

	if unbounded {
		e.taskQueues = newUnBoundedTasks(queues, queueSize)
	} else {
		e.taskQueues = newBoundedTasks(queues, queueSize)
	}

	//start workers
	e.dispatch()

	return e, nil
}
