package taskpool

import (
	"context"
	"sync"
	"sync/atomic"
	"time"

	"testkjcld.bhecard.com/gitlab/kit/esim/config"

	"testkjcld.bhecard.com/gitlab/kit/esim/log"
)

type TaskPool struct {
	wg     sync.WaitGroup
	cancel context.CancelFunc

	logger log.Logger
	conf   config.Config

	acceptJobChan chan IJob
	pool          chan chan IJob
	works         []*worker
	maxWorkers    int64
	runWorkers    int64
}

type Option func(*TaskPool)

var (
	once     sync.Once
	taskPool *TaskPool
)

const defaultPoolSize = 500

func NewTaskPool(opts ...Option) *TaskPool {
	once.Do(func() {
		taskPool = &TaskPool{
			// pool:       make(chan chan IJob, defaultPoolSize),
			acceptJobChan: make(chan IJob),

			works:      make([]*worker, 0),
			maxWorkers: defaultPoolSize,

			wg: sync.WaitGroup{},
		}

		for _, opt := range opts {
			opt(taskPool)
		}

		if taskPool.logger == nil {
			taskPool.logger = log.NewLogger()
		}

		if taskPool.conf == nil {
			taskPool.conf = config.NewMemConfig()
		}

		poolSize := taskPool.conf.GetInt64("taskpool_max_count")
		if 0 < poolSize && poolSize < 50000 {
			taskPool.maxWorkers = poolSize
		}

		taskPool.pool = make(chan chan IJob, taskPool.maxWorkers)

		taskPool.logger.Infof("开启了[%d]task", taskPool.maxWorkers)
	})
	return taskPool
}

func (p *TaskPool) WithTaskSize(taskSize int64) {
	taskPool.maxWorkers = taskSize
	taskPool.pool = make(chan chan IJob, taskSize)
}

func WithTaskLogger(logger log.Logger) Option {
	return func(task *TaskPool) {
		task.logger = logger
	}
}

func WithTaskConf(conf config.Config) Option {
	return func(task *TaskPool) {
		task.conf = conf
	}
}

func (p *TaskPool) Concurrency() int64 {
	return p.maxWorkers - int64(len(p.pool))
}

func (p *TaskPool) PoolSize() int64 {
	return p.maxWorkers - int64(len(p.pool))
}

func (p *TaskPool) AddJobs(jobs ...IJob) {
	for _, job := range jobs {
		p.acceptJobChan <- job
	}
}

func (p *TaskPool) AddFunc(f func()) {
	var jobFunc JobCall = f
	p.acceptJobChan <- jobFunc
}

func Instance() *TaskPool {
	if taskPool == nil {
		taskPool = NewTaskPool()
	}

	return taskPool
}

func (p *TaskPool) Run() {
	ctx, cancel := context.WithCancel(context.Background())
	p.cancel = cancel
	// starting n number of workers
	var i int64
	for i = 0; i < p.maxWorkers; i++ {
		worker := newWorker(i, p.pool)
		p.wg.Add(1)
		go func(idx int64) {
			defer p.wg.Done()
			p.logger.Infof("worker[%v], 开始执行....", idx+1)
			atomic.AddInt64(&p.runWorkers, 1)
			worker.start()
			p.logger.Infof("worker[%v], 结束执行....", idx+1)
		}(i)
		p.works = append(p.works, worker)
	}
	// wait work start
	for {
		if atomic.LoadInt64(&p.runWorkers) == p.maxWorkers {
			p.logger.Infof("work start success;")
			break
		}
		time.Sleep(10 * time.Millisecond)
	}

	// accept task
	go p.process(ctx)
}

func (p *TaskPool) process(ctx context.Context) {
	for {
		select {
		case job := <-p.acceptJobChan:
			// a job request has been received
			// 直接在当前routine完成到worker的分配*/
			jobChannel, ok := <-p.pool
			if !ok {
				p.logger.Infof("Task failure")
				return
			}

			// dispatch the job to the worker job channel
			jobChannel <- job
		case <-ctx.Done():
			var i int64
			for i = 0; i < p.maxWorkers; i++ {
				jobChannel := <-p.pool
				close(jobChannel)
			}
			close(p.pool)

			p.logger.Infof("congratulations, Task over")
			return
		}
	}
}

func (p *TaskPool) Stop() {
	p.cancel()
	p.wg.Wait()

	for _, w := range p.works {
		w.stop()
	}
}
