package utils

import (
	"errors"
	"fmt"
	"os"
	"runtime"
	"sort"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

/***
* 雇佣多个工人进行干活, N个工人干一组的活, 如果干完看其他组有没有需要干活的
*   1. 添加任务时, 指定分组id, 相同的id分配到相同的分组中
*   2. 默认情况下, 保证同一个组, 同时只会被一个纤程按顺序执行, 但是不能保证是同一个纤程
    3. 可以通过配置ConfigChannelMaxWorkNum 配置一个组,可以同时几个工人执行任务

信号任务
    1. 一种信号 其实是一个组
    2. 一种信号 可以绑定多个id
    3. 一个id只能对应一种信号. 如果需要对应多种信号, 可以在id+后缀传入
    4. 触发任务时, 异步执行一种型号内的所有 id对应的回调(依次执行)

编码规则
    1. 减少依赖,作为核心工具存在
       1> 不使用utils中的日志, 方便utils中的日志使用该库, 如果需要使用log, 使用自带的sdk自带的log


*/

type MultiGroupTask struct {
	isTerminated int32
	worker_num   int32 // 当前工作纤程数量
	taskcnt      int32 // 当前任务数量
	//popcnt            int32
	pushfailcnt       int32
	free_channel_flag int32
	channel_size      int32 // 组数量
	started           int32
	channel_obj_cnt   int32 // 数量

	taskPool *TaskPool

	Id string

	workType byte // 0是默认方式, 1:单线程调度方式(启动之前设定有效), 中途改变需要重启, 2:单线程调度, taskPool方式运行

	FixedWorker  byte
	Monitor_push *MonitorSpeedNTimeRec
	Monitor_Pop  *MonitorSpeedNTimeRec

	// 信号
	//    信号id:注册对象
	signMap sync.Map
	//    信号: 组对象
	signGroupMap sync.Map

	// 0: 马上移除, 可能会导致移除失误
	// 1: 手动清理
	config_remove_flag int

	config_grow_work_if_task_num int32    //   任务堆积超过多少时 增长worker  <=0,不增长
	config_max_work_num          int32    // 最多工作纤程数(这些工作纤程, 轮询执行任务)
	config_min_work_num          int32    // 最少保存的工作纤程数据
	config_channel_max_work_num  sync.Map // groupid:max-work-num 设置通道最大纤程数时使用, 如果创建通道时会获取
	closeWg                      sync.WaitGroup
	channelMap                   sync.Map  // 组
	channelPool                  sync.Pool // Pool可以使用,但是用来做队列不行
	postTaskChan                 chan *multi_group_task_channel
	latestcleanupmsg             string
	channel_max_queue_size       int32
	free_channel_timeout_secs    float64 // 如果为0不进行清理
	last_free_channel_t          time.Time
	warnmsg                      string

	OnFatalCallBack func(sender *MultiGroupTask, rec *MultiGroupTaskRec, err error)
}

var (
	defaultWorkersCreateFlag int32           = 0
	defaultWorkers           *MultiGroupTask = nil
)

func CheckInitialDefaultWorkers(worktype byte, minwork, maxwork int32, quemax int32) bool {
	if atomic.CompareAndSwapInt32(&defaultWorkersCreateFlag, 0, 1) {
		defaultWorkers = NewMultiGroupTask()
		defaultWorkers.ConfigMaxWork(maxwork).ConfigMinWork(minwork)
		defaultWorkers.ConfigChannelMaxQueueSize(quemax)
		defaultWorkers.workType = worktype
		defaultWorkers.Start()
		return true
	}
	return false
}

func checkDefaultWorkers() bool {
	if defaultWorkers == nil {
		if atomic.CompareAndSwapInt32(&defaultWorkersCreateFlag, 0, 1) {
			defaultWorkers = NewMultiGroupTask()
			defaultWorkers.ConfigMaxWork(128).ConfigMinWork(20)
			defaultWorkers.ConfigChannelMaxQueueSize(8192)
			defaultWorkers.workType = 1
			defaultWorkers.Start()
		} else {
			time.Sleep(time.Millisecond * 100)
		}
	}
	return defaultWorkers != nil
}

func DefaultWorkers() *MultiGroupTask {
	if !checkDefaultWorkers() {
		panic("DefaultWorkers创建失败!!!")
	}
	return defaultWorkers
}

func NewMultiGroupTask() *MultiGroupTask {
	rval := &MultiGroupTask{
		worker_num:                   0,
		taskcnt:                      0,
		channel_max_queue_size:       4096,
		free_channel_timeout_secs:    120, // 清理间隔, 清理超时设定; 通道N秒没有交互将会被清理,
		config_min_work_num:          10,
		config_max_work_num:          128,
		config_grow_work_if_task_num: 5,
		config_remove_flag:           1,
	}
	rval.Monitor_push = NewMonitorSpeedNTimeRec()
	rval.Monitor_Pop = NewMonitorSpeedNTimeRec()
	rval.channelPool.New = rval.onNewChannel
	return rval
}

func (this *MultiGroupTask) IsTerminated() bool {
	return this.isTerminated == 1
}

func (this *MultiGroupTask) onNewChannel() interface{} {
	val := &multi_group_task_channel{
		task_current_work_cnt: 0,
		work_max_cnt:          1,
		max_queue_size:        this.channel_max_queue_size,
		dataQueue:             NewSyncQueue(),
		state:                 0,
	}
	if DEBUG_MODE == 1 {
		val.entryInfo = NewSyncCycleList()
		val.entryInfo.ConfigMaxSize(20)
	}
	atomic.AddInt32(&this.channel_obj_cnt, 1)
	runtime.SetFinalizer(val, func(obj interface{}) {
		atomic.AddInt32(&this.channel_obj_cnt, -1)
	})
	return val
}

func (this *MultiGroupTask) GetWorkType() byte {
	return this.workType
}

func (this *MultiGroupTask) ConfigWorkType(workType byte) {
	if this.workType != workType {
		this.Stop()
		this.workType = workType
	}
}

func (this *MultiGroupTask) releaseChannel(val *multi_group_task_channel) {
	quesize := atomic.LoadInt32(&val.queue_size)
	if val.task_current_work_cnt != 0 || quesize != 0 {
		fmt.Fprintf(os.Stderr, "[BUG][%s]releaseChannel[%s] err: task_current_work_cnt(%d), task-size:%d", NowString(), val.IDText(), val.task_current_work_cnt, quesize)
	}
	this.channelPool.Put(val)
}

func (this *MultiGroupTask) checkGrowWork() {
	r := atomic.AddInt32(&this.worker_num, 1)
	if r > this.config_max_work_num {
		atomic.AddInt32(&this.worker_num, -1)
		return
	}
	go this.doWorker()
}

func (this *MultiGroupTask) innerDoWork(ch *multi_group_task_channel) {
	if GoFunCatchException {
		defer PanicHandler()
	}
	defer ch.endWork()
	goid := uint64(0)
	if DEBUG_MODE == 1 {
		goid = GetCurrentGoRoutineID()
	}
	this.closeWg.Add(1)
	defer func() {
		this.closeWg.Done()
		atomic.AddInt32(&this.worker_num, -1)
	}()
	ch.last_goroute_id = goid
	ch.last_exec_start_T = time.Now()
	this.doChannelWork(ch)
}

/*
*
workType:1

	单线程调度方式
*/
func (this *MultiGroupTask) checkStartType2() {
	if atomic.CompareAndSwapInt32(&this.started, 0, 1) {
		this.closeWg.Add(1)
		go func() {
			if GoFunCatchException {
				defer PanicHandler()
			}
			defer func() {
				atomic.StoreInt32(&this.started, 0)
				this.closeWg.Done()
			}()
			taskChan := make(chan *multi_group_task_channel, this.channel_max_queue_size<<1)
			this.postTaskChan = taskChan

			taskPool := NewTaskPool(int(this.channel_max_queue_size))
			taskPool.WorkMin = this.config_min_work_num
			taskPool.WorkMax = this.config_max_work_num
			taskPool.RunMinWorkers()
			this.taskPool = taskPool
			defer taskPool.Close()

			runfn := func(ch *multi_group_task_channel) {
				taskPool.PostTask(func(worker *TaskWorker, args ...interface{}) {
					defer ch.endWork()
					if GoFunCatchException {
						defer PanicHandler()
					}
					atomic.AddInt32(&this.worker_num, 1)
					this.closeWg.Add(1)
					defer func() {
						this.closeWg.Done()
						atomic.AddInt32(&this.worker_num, -1)
					}()

					ch := args[0].(*multi_group_task_channel)
					ch.last_exec_start_T = time.Now()
					ch.last_goroute_id = worker.GoID
					this.doChannelWork(ch)
				}, ch)
			}

			//n := 0
			idleTick := time.NewTicker(time.Second * 60)
		break_for:
			for {
				select {
				case ch := <-taskChan:
					if ch == nil {
						break break_for
					}

					if ch.tryBeginWork() { // 尝试锁定
						runfn(ch)
						//n = 0
					} else {
						//n++
						//taskPool.checkRunWorker()
					}
				case <-idleTick.C:
					this.doIdle()
				}
			}
		}()
	}
}

/*
*
workType:1

	单线程调度方式
*/
func (this *MultiGroupTask) checkStartType1() {
	if atomic.CompareAndSwapInt32(&this.started, 0, 1) {
		this.closeWg.Add(1)
		go func() {
			if GoFunCatchException {
				defer PanicHandler()
			}
			defer func() {
				atomic.StoreInt32(&this.started, 0)
				this.closeWg.Done()
			}()

			var runfn func(ch *multi_group_task_channel) bool
			runfn = func(ch *multi_group_task_channel) bool {
				go this.innerDoWork(ch)
				return true
			}
			idleCnt := 0
			for this.isTerminated == 0 {
				n := 0
				this.channelMap.Range(func(key, value interface{}) bool {
					ch := value.(*multi_group_task_channel)
					if ch.tryBeginWork() { // 尝试锁定
						if atomic.AddInt32(&this.worker_num, 1) >= this.config_max_work_num { // 工作纤程不够
							atomic.AddInt32(&this.worker_num, -1)
							ch.endWork()
							return false
						}

						if runfn(ch) {
							n++
						} else {
							atomic.AddInt32(&this.worker_num, -1)
							ch.endWork()
							return false
						}
					}

					return true
				})

				if n == 0 {
					idleCnt++
					if idleCnt%10 == 0 {
						this.doIdle()
					}

					time.Sleep(time.Millisecond * 100)
				} else {
					time.Sleep(0)
				}
			}
		}()
	}
}

/**
 * 可以重复执行g
 */
func (this *MultiGroupTask) Start() {
	Monitor.AddMonitor(this.Monitor_push)
	Monitor.AddMonitor(this.Monitor_Pop)
	if this.workType == 0 {
		DefaultPatrolTask().CheckStart()

		this.isTerminated = 0
		for this.worker_num < this.config_min_work_num {
			this.checkGrowWork()
		}
	} else if this.workType == 2 {
		this.isTerminated = 0
		this.checkStartType2()
	} else {
		this.isTerminated = 0
		this.checkStartType1()
	}
}

func (this *MultiGroupTask) CleanUseless() {
	// 清理信号组
	this.signGroupMap.Range(func(key, value interface{}) bool {
		itm := value.(*MultiGroupTaskSignGroup)
		if itm.Count() == 0 {
			this.signGroupMap.Delete(key)
		}
		return true
	})
}

func (this *MultiGroupTask) DetailsEx2(all bool, max int, searchval, exclusive string) string {
	var sb BytesBuilder
	i := 0
	lst := make([]*multi_group_task_channel, 0)

	this.channelMap.Range(func(key, value interface{}) bool {
		itm := value.(*multi_group_task_channel)

		if len(exclusive) > 0 {
			if strings.Contains(itm.IDText(), exclusive) { // 排除
				return true
			}
		}

		matched := false

		if len(searchval) > 0 {
			if strings.Contains(itm.IDText(), searchval) { // 搜索
				matched = true
			} else {
				return true // 不匹配, 不添加
			}
		}

		if all || itm.queue_size > 0 || matched || itm.task_current_busy_cnt > 0 {
			//itm.tagstr = fmt.Sprintf("%d", itm.GetBusyDuration())
			lst = append(lst, itm)
			i++
		}
		return true
	})

	sort.Slice(lst, func(i, j int) bool {
		itmi := lst[i]
		itmj := lst[j]

		//// 堆积比较严重
		//if (itmi.queue_size > 1000) != (itmj.queue_size > 1000) {
		//	return itmi.queue_size > 1000
		//}
		//if (itmi.queue_size > 100) != (itmj.queue_size > 100) {
		//	return itmi.queue_size > 100
		//}
		//

		//return StrToInt64Def(itmi.tagstr, 0) < StrToInt64Def(itmj.tagstr, 0)

		dura0 := itmi.GetBusyDuration()
		dura1 := itmj.GetBusyDuration()
		if dura0 != dura1 {
			return dura0 > dura1
		}

		if (itmi.queue_size > 0) != (itmj.queue_size > 0) {
			return itmi.queue_size > 0
		}

		//
		if (itmi.push_err_n > 0) != (itmj.push_err_n > 0) {
			return itmi.push_err_n > 0
		}

		if (itmi.last_push_fail_task != nil) != (itmj.last_push_fail_task != nil) {
			return itmi.last_push_fail_task != nil
		}

		if itmi.queue_size != itmj.queue_size {
			return itmi.queue_size > itmj.queue_size
		}

		return itmi.IDText() < itmj.IDText()

		// return itmi.queue_size > itmj.queue_size;
		//return itmi.last_push_T.After(itmj.last_push_T)
	})

	for i := 0; i < len(lst); i++ {
		itm := lst[i]
		if sb.Len() > 0 {
			sb.WriteString("\r\n")
		}
		sb.Appendf("%d\r\n", i+1)
		sb.WriteString(itm.StatusString())
		if max > 0 && i >= max {
			break
		}
	}

	return sb.String()
}

func (this *MultiGroupTask) CheckAbnormal() (err error) {
	this.channelMap.Range(func(key, value interface{}) bool {
		itm := value.(*multi_group_task_channel)
		if itm.IsAbnormal() {
			err = fmt.Errorf("[%s]执行超时(%d)ms", itm.IDText(), itm.GetBusyDuration().Milliseconds())
		}
		return err == nil
	})
	return err
}

func (this *MultiGroupTask) DetailsEx(all bool, max int) string {
	return this.DetailsEx2(all, max, "", "")
}

func (this *MultiGroupTask) Details(all bool) string {
	return this.DetailsEx(all, 0)
}

/*
*

	设置通道的最大工作纤程数量
	  1. 如果设置成大于1或者0, 会有多线程同时执行通道内任务
*/
func (this *MultiGroupTask) ConfigChannelMaxWorkNum(groupid interface{}, max_work_num int32) *MultiGroupTask {
	this.config_channel_max_work_num.Store(groupid, max_work_num)
	if loadObj, loaded := this.channelMap.Load(groupid); loaded {
		loadObj.(*multi_group_task_channel).work_max_cnt = max_work_num
	}
	return this
}

func (this *MultiGroupTask) ConfigMinWork(num int32) *MultiGroupTask {
	this.config_min_work_num = num
	return this
}

func (this *MultiGroupTask) ConfigGrowIfTaskPiled(num int32) *MultiGroupTask {
	this.config_grow_work_if_task_num = num
	return this
}

func (this *MultiGroupTask) ConfigMaxWork(num int32) *MultiGroupTask {
	this.config_max_work_num = num
	return this
}

func (this *MultiGroupTask) ConfigChannelMaxQueueSize(queue_size int32) *MultiGroupTask {
	this.channel_max_queue_size = queue_size
	return this
}

func (this *MultiGroupTask) GetChannelMaxQueueSize() int32 {
	return this.channel_max_queue_size
}

func (this *MultiGroupTask) checkGetSignGroup(sign interface{}, newIfNotfound bool) *MultiGroupTaskSignGroup {
	if v, loaded := this.signGroupMap.Load(sign); loaded {
		return v.(*MultiGroupTaskSignGroup)
	}

	if !newIfNotfound {
		return nil
	}

	obj := &MultiGroupTaskSignGroup{}
	if v, loaded := this.signGroupMap.LoadOrStore(sign, obj); loaded {
		return v.(*MultiGroupTaskSignGroup)
	} else {
		obj.groupid = sign
		return obj
	}
}

func (this *MultiGroupTask) SignUnRegister(id interface{}) bool {
	if v, loaded := this.signMap.Load(id); loaded {
		rec := v.(*MultiGroupTaskSignRec)
		this.signMap.Delete(id)
		groupRec := this.checkGetSignGroup(rec.groupid, false)
		if groupRec != nil { // 下面要进行判断, 组有可能被移除
			groupRec.Remove(id)
			if this.config_remove_flag == 0 {
				if groupRec.Count() == 0 {
					// 高并发时,可能下面的判断会导致移除掉有信号的信号组
					//   因为下面的判断个数和判断并不是原子操作
					this.signGroupMap.Delete(rec.groupid)
				}
			}

		} else {
			return false
		}
		return true
	} else {
		return false
	}
}

func (this *MultiGroupTask) SignCount() int {
	i := 0
	this.signMap.Range(func(key, value interface{}) bool {
		i++
		return true
	})
	return i
}

func (this *MultiGroupTask) SignGroupCount() int {
	i := 0
	this.signGroupMap.Range(func(key, value interface{}) bool {
		i++
		return true
	})
	return i
}

/*
*
  - 注册信号 回调
  - 一个信号可以绑定多个回调 (其实一个信号) 就是一个groupid
  - id: 需要唯一,  传入, 一个id只能注册一个信号, 如果已经注册, 注册失败
  - 返回值:
    false: 注册失败
*/
func (this *MultiGroupTask) SignRegister(id, sign interface{}, cb func(args ...interface{})) bool {

	if _, loaded := this.signMap.Load(id); loaded {
		return false
	}
	newRec := getSignRec()
	if _, loaded := this.signMap.LoadOrStore(id, newRec); loaded {
		newRec.Release()
		return false
	}

	newRec.id = id
	newRec.groupid = sign
	newRec.cb = cb

	// 组添加
	signGroup := this.checkGetSignGroup(sign, true)
	signGroup.Add(newRec)
	return true
}

func (this *MultiGroupTask) onSignTask(sender *MultiGroupTaskRec) {
	signGroup := this.checkGetSignGroup(sender.groupID, false)
	if signGroup != nil {
		cnt := signGroup.Execute(sender)
		if sender.paramFunc != nil {
			cbFunc := sender.paramFunc.(func(sign interface{}, cnt int))
			if cbFunc != nil {
				cbFunc(signGroup.groupid, cnt)
			}

		}
	}
}

/*
**
  - 触发信号
  - 异步触发
    cb 为执行完成回调  cnt 表示 执行成功的次数
*/
func (this *MultiGroupTask) SignTrigger(sign interface{}, doneCb func(sign interface{}, cnt int), args ...interface{}) bool {
	signGroup := this.checkGetSignGroup(sign, false)
	if signGroup != nil {
		rec := &MultiGroupTaskRec{
			Args:      args,
			Cb:        this.onSignTask,
			paramFunc: doneCb,
		}
		this.PostTask(sign, rec)
		return true
	}
	return false
}

func (this *MultiGroupTask) TaskCnt() int32 {
	return this.taskcnt
}

func (this *MultiGroupTask) Terminate() {
	this.isTerminated = 1
}

func (this *MultiGroupTask) Stop() {
	this.isTerminated = 1

	taskChan := this.postTaskChan
	if taskChan != nil {
		taskChan <- nil
	}
	this.closeWg.Wait()

	Monitor.RemoveMonitorRec(this.Monitor_push)
	Monitor.RemoveMonitorRec(this.Monitor_Pop)
}

func (this *MultiGroupTask) GroupStatus(groupid interface{}) string {
	if obj, ok := this.channelMap.Load(groupid); ok {
		itm := obj.(*multi_group_task_channel)
		return itm.StatusString()
	} else {
		return "NULL"
	}
}

func (this *MultiGroupTask) GroupSimpleStatus(groupid interface{}) string {
	if obj, ok := this.channelMap.Load(groupid); ok {
		itm := obj.(*multi_group_task_channel)
		return fmt.Sprintf("work: %d/%d(max), task(remain/fail/total):%d/%d/%d, goid:%d(%d ms)",
			itm.task_current_work_cnt, itm.work_max_cnt, itm.queue_size, itm.push_err_n, itm.push_cnt, itm.last_goroute_id, itm.GetBusyDuration().Milliseconds())
	} else {
		return "NULL"
	}
}

func (this *MultiGroupTask) StatusSimpleString() string {
	var sb strings.Builder
	sb.WriteString(fmt.Sprintf("group:%d", this.channel_size))
	if this.workType == 2 {
		sb.WriteString(fmt.Sprintf(", workers[2]:%s", this.taskPool.Status()))
		sb.WriteString(fmt.Sprintf(", task:%d", this.taskcnt))
		sb.WriteString(fmt.Sprintf(", cmdpost-chan-size:%d/%d", len(this.postTaskChan), cap(this.postTaskChan)))
	} else {
		sb.WriteString(fmt.Sprintf(", workers[%d]:%d (%d->%d)", this.workType, this.worker_num, this.config_min_work_num, this.config_max_work_num))
		sb.WriteString(fmt.Sprintf(", task:%d", this.taskcnt))
	}

	sb.WriteString(fmt.Sprintf(", push:%s, pop:%s, pushfail:%d", this.Monitor_push.Info(), this.Monitor_Pop.Info(), this.pushfailcnt))

	return sb.String()
}

func (this *MultiGroupTask) StatusString() string {
	var sb strings.Builder
	sb.WriteString("状态:")
	if this.isTerminated == 0 {
		sb.WriteString("开启\r\n")
	} else {
		sb.WriteString("停止\r\n")
	}
	sb.WriteString(fmt.Sprintf("组数量:%d\r\n", this.channel_size))
	sb.WriteString(fmt.Sprintf("信号组数量:%d, 信号数量:%d\r\n", this.SignGroupCount(), this.SignCount()))
	if this.workType == 2 {
		taskPool := this.taskPool
		if taskPool != nil {
			sb.WriteString(fmt.Sprintf("线程池: %s\r\n", taskPool.Status()))
		}
		sb.WriteString(fmt.Sprintf("任务投递队列: %d/%d\r\n", len(this.postTaskChan), cap(this.postTaskChan)))
	} else {
		sb.WriteString(fmt.Sprintf("线程池:%d (%d->%d), type:%d\r\n", this.worker_num, this.config_min_work_num, this.config_max_work_num, this.workType))
	}

	sb.WriteString(fmt.Sprintf("当前任务:%d\r\n", this.taskcnt))
	sb.WriteString(fmt.Sprintf("任务计数:push:%s, pop:%s, pushfail:%d\r\n", this.Monitor_push.Info(), this.Monitor_Pop.Info(), this.pushfailcnt))
	sb.WriteString(fmt.Sprintf("通道对象存活数:%d %s\r\n", this.channel_obj_cnt, this.latestcleanupmsg))

	if len(this.warnmsg) > 0 {
		sb.WriteString(this.warnmsg)
		sb.WriteString("\r\n")
	}

	sb.WriteString("尚未执行完成任务组(最多罗列10组):\r\n")
	sb.WriteString(this.DetailsEx(false, 10))
	return sb.String()
}

func (this *MultiGroupTask) cleanup(secs float64) {
	n := 0
	this.channelMap.Range(func(key, value interface{}) bool {
		channel := value.(*multi_group_task_channel)
		if channel.tryMarkCleanup(secs) {
			this.channelMap.Delete(channel.id)
			atomic.AddInt32(&this.channel_size, -1)
			if channel.queue_size > 0 {
				strMsg := fmt.Sprintf("[BUG][%s][%s]清理完后,队列中居然还有任务:%d\n", NowString(), ObjectHexAddr(channel.id), channel.queue_size)
				this.warnmsg = strMsg
				fmt.Fprint(os.Stderr, strMsg)
				this.doFatalCb(channel, errors.New(strMsg))
			}
			this.releaseChannel(channel)
			n++
		}
		return true
	})
	if n > 0 {
		this.latestcleanupmsg = fmt.Sprintf("[%s]清理[%d]没有使用的通道", NowString(), n)
	}
}

func (this *MultiGroupTask) onTaskFuncArgs(sender *MultiGroupTaskRec) {
	cbFun := sender.paramFunc.(func(args ...interface{}))
	if cbFun != nil {
		cbFun(sender.Args...)
	}

}

func (this *MultiGroupTask) onTaskFuncNoArgs(sender *MultiGroupTaskRec) {
	cbFun := sender.paramFunc.(func())
	if cbFun != nil {
		cbFun()
	}
}

/***
 * 可以用做同步执行
 */
func (this *MultiGroupTask) PostTaskFuncArgs(groupid interface{}, cb func(args ...interface{}), args ...interface{}) error {
	rec := &MultiGroupTaskRec{
		Args:      args,
		paramFunc: cb,
		Cb:        this.onTaskFuncArgs,
	}
	return this.PostTask(groupid, rec)
}

type waitrec struct {
	doflag   int32
	complete chan byte
}

func (this *MultiGroupTask) WaitExecFunc(sender *MultiGroupTaskRec) {
	cbFun := sender.paramFunc.(func())
	waitRec := sender.arg.(*waitrec)
	if atomic.CompareAndSwapInt32(&waitRec.doflag, 0, 1) { // 执行
		if cbFun != nil {
			cbFun()
		}
		close(waitRec.complete) // 改成close 不回阻塞
		//waitRec.complete <- 1 // 只有争取到, 才需要推入执行完成标记
	}
}

/*
参数:

	timeout:表示等待执行的时间， 如果到时尚未执行, 则函数不会再执行, 如果到超时时间已经在执行，则会等待执行完成

返回:

	ok: true: 表示执行成功, false:执行超时
	err: 投递任务失败, wait timeout
*/
func (this *MultiGroupTask) PostTaskFuncAndWait(groupid interface{}, timeout time.Duration, cb func()) (ok bool, err error) {
	waitRec := &waitrec{doflag: 0, complete: make(chan byte)}
	rec := &MultiGroupTaskRec{
		arg:       waitRec,
		paramFunc: cb,
		Cb:        this.WaitExecFunc,
	}
	err = this.PostTask(groupid, rec)

	if err != nil {
		return
	}

	timer := time.NewTimer(timeout)
	select {
	case <-waitRec.complete:
		if !timer.Stop() {
			<-timer.C
		}
		return true, nil
	case <-timer.C:
		if atomic.CompareAndSwapInt32(&waitRec.doflag, 0, 2) { // doflag 设为失败
			//return false, fmt.Errorf("task post wait timeout!")
			return false, nil
		} else { // 设置失败, 等待执行完成
			<-waitRec.complete
			return true, nil
		}
	}
}

func (this *MultiGroupTask) PostTaskFunc(groupid interface{}, cb func()) error {
	rec := &MultiGroupTaskRec{
		Args:      nil,
		paramFunc: cb,
		Cb:        this.onTaskFuncNoArgs,
	}
	return this.PostTask(groupid, rec)
}

func (this *MultiGroupTask) PostTaskArgs(groupid interface{}, cb func(sender *MultiGroupTaskRec), args ...interface{}) error {
	rec := &MultiGroupTaskRec{
		Args: args,
		Cb:   cb,
	}
	return this.PostTask(groupid, rec)
}

func (this *MultiGroupTask) PostTaskArgsEx(groupid interface{}, cb func(args ...interface{}), args ...interface{}) error {
	rec := &MultiGroupTaskRec{
		Args: args,
		Cb: func(sender *MultiGroupTaskRec) {
			cb(sender.Args...)
		},
	}
	return this.PostTask(groupid, rec)
}

func (this *MultiGroupTask) PostTask(groupid interface{}, rec *MultiGroupTaskRec) error {
	if this.isTerminated == 1 {
		return errors.New("[停止工作]不能压入任务")
	}
	if DEBUG_MODE == 1 {
		rec.CallStack = fmt.Sprintf("[%s]%s", NowString(), GetCallStack(1))
	}
	rec.groupID = groupid
	// 设定新的组设定
	newChannel := this.channelPool.Get().(*multi_group_task_channel)
	newChannel.reset()
	newChannel.id = groupid
	newChannel.last_activity = time.Now()
	if cnt, loaded := this.config_channel_max_work_num.Load(groupid); loaded {
		newChannel.work_max_cnt = cnt.(int32)
		if newChannel.work_max_cnt < 0 {
			newChannel.work_max_cnt = 1
		}
	}

	loadObj, old := this.channelMap.LoadOrStore(groupid, newChannel)
	if old {
		this.releaseChannel(newChannel)
	} else { // 使用的新的
		atomic.AddInt32(&this.channel_size, 1)
	}
	realChannel := loadObj.(*multi_group_task_channel)

	if this.workType == 2 {
		taskChan := this.postTaskChan
		if this.started == 0 || taskChan == nil {
			return errors.New("[停止工作]不能压入任务")
		}

		this.Monitor_push.Delta(1)
		atomic.AddInt32(&this.taskcnt, 1)
		err := realChannel.Push(rec)
		if err != nil {
			// 压入失败 -1
			atomic.AddInt32(&this.taskcnt, -1)
			atomic.AddInt32(&this.pushfailcnt, 1)
			return err
		}
		taskChan <- realChannel
		return nil
	} else {
		this.Monitor_push.Delta(1)
		atomic.AddInt32(&this.taskcnt, 1)
		err := realChannel.Push(rec)
		if err != nil {
			// 压入失败 -1
			atomic.AddInt32(&this.taskcnt, -1)
			atomic.AddInt32(&this.pushfailcnt, 1)
		} else {
			if this.FixedWorker == 0 && this.workType == 0 {
				if this.config_grow_work_if_task_num > 0 && this.taskcnt > this.config_grow_work_if_task_num { // 大于100个任务未执行
					go func() { // 避免调用的是DefaultPatrolTask()的回调, 这样会卡死
						DefaultPatrolTask().AddTaskOnceA1(realChannel, func(arg interface{}) {
							ch := arg.(*multi_group_task_channel)
							if ch.needWorker() {
								this.checkGrowWork() // 增长worker
							}
						})
					}()
				}
			}
		}
		return err
	}
}

func (this *MultiGroupTask) PickChannel() *multi_group_task_channel {
	var rval *multi_group_task_channel = nil
	this.channelMap.Range(func(key, value interface{}) bool {
		channel := value.(*multi_group_task_channel)
		if channel.tryBeginWork() {
			rval = channel
			return false // 断开Range循环
		}
		return true
	})

	return rval
}

func (this *MultiGroupTask) doIdle() {
	if this.free_channel_timeout_secs > 0 { // 尝试进行清理
		if atomic.CompareAndSwapInt32(&this.free_channel_flag, 0, 1) {
			if this.last_free_channel_t.IsZero() || time.Now().Sub(this.last_free_channel_t).Seconds() > this.free_channel_timeout_secs {
				this.cleanup(this.free_channel_timeout_secs)
				this.last_free_channel_t = time.Now()
			}
			this.free_channel_flag = 0
		}
	}
}

func (this *MultiGroupTask) doFatalCb(channel *multi_group_task_channel, err error) {
	for {
		rec := channel.Pop()
		if rec == nil {
			break
		}
		atomic.AddInt32(&this.taskcnt, -1)
		//atomic.AddInt32(&this.popcnt, 1)
		this.Monitor_Pop.Delta(1)
		this.OnFatalCallBack(this, rec, err)
	}
}

/**
 * 如果channel 运行多个纤程工作, 通一个channel会有多纤程同时执行
 */
func (this *MultiGroupTask) doChannelWork(channel *multi_group_task_channel) {
	atomic.AddInt32(&channel.task_current_busy_cnt, 1)
	defer func() {
		atomic.AddInt32(&channel.task_current_busy_cnt, -1)
	}()
	for {
		rec := channel.Pop()
		if rec == nil {
			break
		}
		atomic.AddInt32(&this.taskcnt, -1)
		this.Monitor_Pop.Delta(1)
		//atomic.AddInt32(&this.popcnt, 1)

		if this.isTerminated == 1 {
			rec.err = errors.New("[工作停止]请勿执行耗时操作")
			//} else if err != nil {
			//	cachelist.err = err
		}
		rec.CallBack()

	}
}

func (this *MultiGroupTask) doWorker() {
	if GoFunCatchException {
		defer PanicHandler()
	}
	goid := GetCurrentGoRoutineID()
	this.closeWg.Add(1)
	defer func() {
		this.closeWg.Done()
		atomic.AddInt32(&this.worker_num, -1)
	}()
	idleCnt := 0
	for {
		// 选取一个组
		channel := this.PickChannel()
		if channel == nil {
			if this.isTerminated == 1 {
				break
			}
			idleCnt++
			if idleCnt%10 == 0 {
				if this.FixedWorker == 0 {
					if this.worker_num > this.config_min_work_num {
						break // 1秒没有任务, 进行释放worker
					}
				}
				this.doIdle()
			}
			time.Sleep(time.Millisecond * 100)
			continue // 没有需要工作的任务组
		} else {
			idleCnt = 0
			channel.last_goroute_id = goid
			channel.last_exec_start_T = time.Now()
			this.doChannelWork(channel) // 执行通道中的任务
			channel.endWork()
		}
		//time.Sleep(time.Millisecond * 100)

	}

}
