package SheXiang_mq

import (
	"fmt"
	"github.com/panjf2000/ants/v2"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/limit"
	"db2s/ref"
	"math/rand"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

type MqProductCustomerSendMsgSum struct {
	ProductMsg               *int64 //生产者消息
	CustomerMsg              *int64 //消费者消息
	ProductObject            *int64 //生产者对象
	CustomerObject           *int64 //消费者对象
	ProductSendMsgFinishMark *int64 //消费端结束状态
	QuitExec                 *int64
}
type SpeedLimit struct {
	Schema, Table                                                 string
	ChunkSum                                                      int64
	Wg                                                            *sync.WaitGroup
	ReadSum, lastSecondReadSum, WriteSum, lastSecondWriteSum, Rwd *int64
	ReadParallel                                                  *int64
	WriteParallel                                                 *int64
	ReadExecCount, lastSecondExecReadCount, Rc                    *int64
	WriteExecCount, lastSecondExecWriteCount, Wc                  *int64
	readRate, writeRate                                           *uint64
	ReadAvgTime, lastSecondExecReadTime                           *uint64 //read data exec sum time(ms)
	WriteAvgTime, lastSecondExecWriteTime                         *uint64 //write data exec sum time(ms)
	SLimit                                                        *limit.RateLimiter
	AvgRowsSize                                                   int64
	speedRestriction, speedRestricted                             atomic.Value //限速原因
	Status                                                        *int64       //0 表示不限速，1表示限速
	SpeedEndDone                                                  chan struct{}
	DeferFuncQuit                                                 global.EFunc
	SpeedAddFunc                                                  global.EFunc
	SpeedDelFunc                                                  global.EFunc
	SendResFunc                                                   global.EFunc
}
type ChanSendMsg struct {
	Table             string
	BaseMsg           map[string]any
	MsgModifyKey      chan any
	MsgData           map[string]any
	MsgSuccessfulFunc global.EFunc
	MsgSendFunc       global.EFunc
	MsgFinishFunc     global.EFunc
	MsgProducer       Producer
}
type ObjectMeta struct {
	Schema string
	Table  string
}
type SendCurryLimit struct {
	Wg         *sync.WaitGroup
	Switch     bool
	ManualDone bool
	CurrySum   chan struct{}
}
type MonitorMsgMq struct {
	Wg               *sync.WaitGroup
	CurryLimit       SendCurryLimit
	SendMsg          ChanSendMsg
	FirstMsg         MqProductCustomerSendMsgSum
	SecondMsg        MqProductCustomerSendMsgSum
	AbnormalQuit     global.EFunc
	NormalQuit       global.EFunc
	ExecFunc         []global.EFunc
	DeferFuncQuit    global.EFunc
	GeneralSeqLog    bool
	ContinueFunc     global.EFunc
	ErrStatus        *int64
	SpeedLimitSwitch bool //是否启用限速
	Speed            SpeedLimit
	done             chan struct{}
	Done             chan struct{}
}

func (mm MonitorMsgMq) getFirstMsgFinishStatus() bool {
	return atomic.LoadInt64(mm.FirstMsg.ProductSendMsgFinishMark) == 1
}
func (mm MonitorMsgMq) getSecondMsgFinishStatus() bool {
	return atomic.LoadInt64(mm.FirstMsg.ProductMsg) == atomic.LoadInt64(mm.SecondMsg.ProductSendMsgFinishMark)
}
func (mm MonitorMsgMq) getFirstMsgProductCustomerEq() bool {
	return atomic.LoadInt64(mm.FirstMsg.ProductMsg) == atomic.LoadInt64(mm.FirstMsg.CustomerMsg)
}
func (mm MonitorMsgMq) getSecondMsgProductCustomerEq() bool {
	return atomic.LoadInt64(mm.SecondMsg.ProductMsg) == atomic.LoadInt64(mm.SecondMsg.CustomerMsg)
}
func (mm MonitorMsgMq) getFirstObjectProductCustomerEq() bool {
	return atomic.LoadInt64(mm.FirstMsg.ProductObject) == atomic.LoadInt64(mm.FirstMsg.CustomerObject)
}
func (mm MonitorMsgMq) FirstObjectProductAdd() {
	atomic.SwapInt64(mm.FirstMsg.ProductObject, 1)
}
func (mm MonitorMsgMq) FirstObjectProductAddSum(sum int64) {
	atomic.AddInt64(mm.FirstMsg.ProductObject, sum)
}
func (mm MonitorMsgMq) FirstObjectCustomerAdd() {
	atomic.SwapInt64(mm.FirstMsg.CustomerObject, 1)
}
func (mm MonitorMsgMq) FirstMsgFinishMarkAdd() {
	atomic.SwapInt64(mm.FirstMsg.ProductSendMsgFinishMark, 1)
}
func (mm MonitorMsgMq) SecondMsgFinishMarkAdd() {
	atomic.SwapInt64(mm.SecondMsg.ProductSendMsgFinishMark, 1)
}

func (mm MonitorMsgMq) FirstMsgProductAdd() {
	atomic.SwapInt64(mm.FirstMsg.ProductMsg, 1)
}
func (mm MonitorMsgMq) FirstMsgCustomerAdd() {
	atomic.SwapInt64(mm.FirstMsg.CustomerMsg, 1)
}
func (mm MonitorMsgMq) SecondMsgProductAdd() {
	atomic.SwapInt64(mm.SecondMsg.ProductMsg, 1)
}
func (mm MonitorMsgMq) SecondMsgCustomerAdd() {
	atomic.AddInt64(mm.SecondMsg.CustomerMsg, 1)
}
func (mm MonitorMsgMq) FirstObjectProductSwap(sum int64) {
	atomic.SwapInt64(mm.FirstMsg.ProductObject, sum)
}
func (mm MonitorMsgMq) FirstMsgFinishMarkSwap() {
	atomic.SwapInt64(mm.FirstMsg.ProductSendMsgFinishMark, 1)
}
func (mm MonitorMsgMq) SecondMsgFinishMarkSwap() {
	atomic.SwapInt64(mm.SecondMsg.ProductSendMsgFinishMark, 1)
}
func (mm MonitorMsgMq) ExecSecondMsgCustomerMqSum(s *int64) {
	atomic.AddInt64(s, 1)
}
func (mm MonitorMsgMq) MsgCurryAdd() {
	mm.CurryLimit.CurrySum <- struct{}{}
}
func (mm MonitorMsgMq) MsgCurryDone() {
	<-mm.CurryLimit.CurrySum
}
func (mm MonitorMsgMq) MsgCurryWgAdd() {
	mm.CurryLimit.Wg.Add(1)
}
func (mm MonitorMsgMq) MsgCurryWgDone() {
	mm.CurryLimit.Wg.Done()
}
func (mm MonitorMsgMq) AbnormalErrorCodeSwap() {
	atomic.SwapInt64(mm.ErrStatus, -1)
}
func (mm MonitorMsgMq) getAbnormalErrorCode() bool {
	return atomic.LoadInt64(mm.ErrStatus) < 0
}
func (mm MonitorMsgMq) getQuitExec() bool {
	return atomic.LoadInt64(mm.FirstMsg.QuitExec) == 1
}
func (mm MonitorMsgMq) SetQuitExec() {
	atomic.SwapInt64(mm.FirstMsg.QuitExec, 1)
}
func productCustomerMsgInit() MqProductCustomerSendMsgSum {
	var (
		productMsg               int64 = 0
		customerMsg              int64 = 0
		productObject            int64 = 0
		customerObject           int64 = 0
		productSendMsgFinishMark int64 = 0
		mqQuitExec               int64 = 0
	)
	return MqProductCustomerSendMsgSum{
		ProductMsg:               &productMsg,
		CustomerMsg:              &customerMsg,
		ProductObject:            &productObject,
		CustomerObject:           &customerObject,
		ProductSendMsgFinishMark: &productSendMsgFinishMark,
		QuitExec:                 &mqQuitExec,
	}
}

func NewMonitorMsgMq() *MonitorMsgMq {
	var errStatus int64 = 0
	return &MonitorMsgMq{
		FirstMsg:      productCustomerMsgInit(),
		SecondMsg:     productCustomerMsgInit(),
		AbnormalQuit:  global.EFunc{},
		NormalQuit:    global.EFunc{},
		DeferFuncQuit: global.EFunc{},
		done:          make(chan struct{}, 1),
		ErrStatus:     &errStatus,
		Done:          make(chan struct{}, 1),
	}
}
func NewMonitorMsgMq1() MonitorMsgMq {
	var errStatus int64 = 0
	return MonitorMsgMq{
		FirstMsg:      productCustomerMsgInit(),
		SecondMsg:     productCustomerMsgInit(),
		AbnormalQuit:  global.EFunc{},
		NormalQuit:    global.EFunc{},
		DeferFuncQuit: global.EFunc{},
		done:          make(chan struct{}, 1),
		ErrStatus:     &errStatus,
		Done:          make(chan struct{}, 1),
	}
}
func (mm MonitorMsgMq) SetNormalQuitFunc(funcName global.EFunc) (res MonitorMsgMq) {
	res = mm
	res.NormalQuit = funcName
	return
}
func (mm MonitorMsgMq) SetAbnormalQuitFunc(funcName global.EFunc) (res MonitorMsgMq) {
	res = mm
	res.AbnormalQuit = funcName
	return
}
func (mm MonitorMsgMq) SetDeferFuncQuitFunc(funcName global.EFunc) (res MonitorMsgMq) {
	res = mm
	res.DeferFuncQuit = funcName
	return
}
func (mm MonitorMsgMq) SetGeneralSeqLogFunc(r bool) (res MonitorMsgMq) {
	res = mm
	res.GeneralSeqLog = r
	return
}
func (mm MonitorMsgMq) SetContinueFunc(funcName global.EFunc) (res MonitorMsgMq) {
	res = mm
	res.ContinueFunc = funcName
	return
}
func (mm MonitorMsgMq) SetGeneralSeqLog() (res MonitorMsgMq) {
	res = mm
	res.GeneralSeqLog = true
	return
}
func (mm MonitorMsgMq) SetExecFunc(funcName []global.EFunc) (res MonitorMsgMq) {
	res = mm
	res.ExecFunc = funcName
	return
}
func (mm MonitorMsgMq) SetSendMsg(data chan any) (res MonitorMsgMq) {
	res = mm
	res.SendMsg.MsgModifyKey = data
	return
}
func (mm MonitorMsgMq) SetCurryLimit(curryLimit SendCurryLimit) (res MonitorMsgMq) {
	res = mm
	res.CurryLimit = curryLimit
	return
}
func (mm MonitorMsgMq) printInfoDebug(table, object string) {
	switch object {
	case "ExpensiveOperation":
		fmt.Println(fmt.Sprintf("--------ExpensiveOperation: table:%v FirstProductObject:%v "+
			"FirstCustomerObject:%v "+
			"FirstProductMsg:%v "+
			"FirstCustomerMsg:%v "+
			"ProductSendMsgFinishMark:%v \n"+
			"getFirstObjectProductCustomerEq: FirstMsg.ProductObject == FirstMsg.CustomerObject -->:%v \n"+
			"getFirstMsgProductCustomerEq: FirstMsg.ProductMsg == FirstMsg.CustomerMsg  -->: %v \n"+
			"getFirstMsgFinishStatus: FirstMsg.ProductSendMsgFinishMark == 1 -->: %v \n",
			table,
			atomic.LoadInt64(mm.FirstMsg.ProductObject),
			atomic.LoadInt64(mm.FirstMsg.CustomerObject),
			atomic.LoadInt64(mm.FirstMsg.ProductMsg),
			atomic.LoadInt64(mm.FirstMsg.CustomerMsg),
			atomic.LoadInt64(mm.FirstMsg.ProductSendMsgFinishMark),
			mm.getFirstObjectProductCustomerEq(),
			mm.getFirstMsgProductCustomerEq(),
			mm.getFirstMsgFinishStatus()))
	case "MqTableStatusMonitor":
		fmt.Println(fmt.Sprintf("--------------MqTableStatusMonitor: table:%v "+
			"FirstProductMsg:%v "+
			"SecondProductMsg:%v "+
			"SecondCustomerMsg:%v "+
			"SecondProductSendMsgFinishMark:%v \n"+
			"getSecondMsgProductCustomerEq: SecondMsg.ProductMsg == SecondMsg.CustomerMsg -->: %v \n"+
			"getSecondMsgFinishStatus: FirstMsg.ProductMsg == SecondMsg.ProductSendMsgFinishMark -->: %v \n",
			table,
			atomic.LoadInt64(mm.FirstMsg.ProductMsg), atomic.LoadInt64(mm.SecondMsg.ProductMsg), atomic.LoadInt64(mm.SecondMsg.CustomerMsg),
			atomic.LoadInt64(mm.SecondMsg.ProductSendMsgFinishMark),
			mm.getSecondMsgProductCustomerEq(), mm.getSecondMsgFinishStatus()))
	default:

	}
}
func (mm MonitorMsgMq) ExpensiveOperation(table string) {
	mm.Wg.Add(1)
	ticker := time.NewTicker(global.MonitorInterval * time.Second)
	go func() {
		defer func() {
			if mm.DeferFuncQuit.FuncName != nil {
				mm.DeferFuncQuit.FuncName("", mm.DeferFuncQuit.Params)
			}
			mm.done <- struct{}{}
			mm.Wg.Done()
		}()
		for {
			select {
			case <-ticker.C:
				if mm.getAbnormalErrorCode() {
					return
				}
				//mm.printInfoDebug(table, "ExpensiveOperation")
				if mm.getFirstObjectProductCustomerEq() {
					if mm.getFirstMsgProductCustomerEq() && mm.getFirstMsgFinishStatus() {
						return
					}
				}
			}
		}
	}()
}

// MqTableStatusMonitor 每个表的mq消息状态监听，结束时发送状态给bar
func (mm MonitorMsgMq) MqTableStatusMonitor(table string) {
	var quit bool
	mm.ExpensiveOperation(table)
	mm.Wg.Add(1)
	go func() {
		defer func() {
			if r := recover(); r != nil {
				// 可选：记录 panic 日志
			}
			mm.Speed.SpeedEndDone <- struct{}{}
			if mm.DeferFuncQuit.FuncName != nil {
				mm.DeferFuncQuit.FuncName("", mm.DeferFuncQuit.Params)
			}
			mm.Wg.Done()
		}()
		ticker := time.NewTicker(1 * time.Second)
		defer ticker.Stop()
		for {
			select {
			case <-ticker.C:
				if !quit {
					continue
				}
				if mm.getAbnormalErrorCode() {
					if mm.AbnormalQuit.FuncName != nil {
						mm.AbnormalQuit.FuncName("", mm.AbnormalQuit.Params)
					}
					return
				}
				//mm.printInfoDebug(table, "MqTableStatusMonitor")
				if mm.getSecondMsgProductCustomerEq() && mm.getSecondMsgFinishStatus() && mm.getQuitExec() {
					if mm.NormalQuit.FuncName != nil {
						mm.NormalQuit.FuncName("", mm.NormalQuit.Params)
					}
					return
				}
			case <-mm.done:
				quit = true
			}
		}
	}()
}
func NewSpeedMonitor(schema, table string, chunkSize int64) SpeedLimit {
	var (
		readSum, writeSum, lastSecondReadSum, lastSecondWriteSum, readParallel, writeParallel, status, readExecCount, writeExecCount int64  = 0, 0, 0, 0, 0, 0, 0, 0, 0
		lastSecondExecReadCount, lastSecondExecWriteCount, Rwd, rc, wc                                                               int64  = 0, 0, 0, 0, 0
		readAvgTime, writeAvgTime, lastSecondExecReadTime, lastSecondExecWriteTime, readRate, writeRate                              uint64 = 0, 0, 0, 0, 0, 0
	)
	return SpeedLimit{
		Schema: schema, Table: table, ChunkSum: chunkSize, Status: &status,
		ReadSum: &readSum, WriteSum: &writeSum, ReadParallel: &readParallel, WriteParallel: &writeParallel,
		ReadAvgTime: &readAvgTime, WriteAvgTime: &writeAvgTime, ReadExecCount: &readExecCount, WriteExecCount: &writeExecCount,
		lastSecondReadSum: &lastSecondReadSum, lastSecondWriteSum: &lastSecondWriteSum,
		lastSecondExecReadTime: &lastSecondExecReadTime, lastSecondExecWriteTime: &lastSecondExecWriteTime,
		lastSecondExecReadCount: &lastSecondExecReadCount, lastSecondExecWriteCount: &lastSecondExecWriteCount,
		readRate: &readRate, writeRate: &writeRate, SpeedEndDone: make(chan struct{}, 1),
		Rwd: &Rwd, Rc: &rc, Wc: &wc, SLimit: limit.NewRateLimiter(1000000000, 1),
	}
}
func (sl SpeedLimit) getReadRate() {
	//获取读取端每秒的速率
	rq := atomic.LoadUint64(sl.ReadAvgTime) - atomic.LoadUint64(sl.lastSecondExecReadTime)
	atomic.SwapInt64(sl.Rc, atomic.LoadInt64(sl.ReadExecCount)-atomic.LoadInt64(sl.lastSecondExecReadCount))
	if rq == 0 || atomic.LoadInt64(sl.Rc) == 0 {
		atomic.SwapUint64(sl.readRate, 0)
	} else {
		atomic.SwapUint64(sl.readRate, rq/uint64(atomic.LoadInt64(sl.Rc)))
	}
	return
}
func (sl SpeedLimit) getWriteRate() {
	//获取写入端每秒的速率
	wq := atomic.LoadUint64(sl.WriteAvgTime) - atomic.LoadUint64(sl.lastSecondExecWriteTime)
	atomic.SwapInt64(sl.Wc, atomic.LoadInt64(sl.WriteExecCount)-atomic.LoadInt64(sl.lastSecondExecWriteCount))
	if wq == 0 || atomic.LoadInt64(sl.Wc) == 0 {
		atomic.SwapUint64(sl.writeRate, 0)
	} else {
		atomic.SwapUint64(sl.writeRate, wq/uint64(atomic.LoadInt64(sl.Wc)))
	}
	return
}
func (sl SpeedLimit) ReadParallelDel() {
	atomic.AddInt64(sl.ReadParallel, -1)
}
func (sl SpeedLimit) ReadParallelAdd() {
	atomic.AddInt64(sl.ReadParallel, 1)
}

func (sl SpeedLimit) WriteParallelDel() {
	atomic.AddInt64(sl.WriteParallel, -1)
}
func (sl SpeedLimit) WriteParallelAdd() {
	atomic.AddInt64(sl.WriteParallel, 1)
}
func (sl SpeedLimit) ReadSumAdd(sum int64) {
	atomic.AddInt64(sl.ReadSum, sum)
}
func (sl SpeedLimit) WriteSumAdd(sum int64) {
	atomic.AddInt64(sl.WriteSum, sum)
}
func (sl SpeedLimit) ReadAvgTimeAdd(avgTime uint64) {
	atomic.AddUint64(sl.ReadAvgTime, avgTime)
}
func (sl SpeedLimit) WriteAvgTimeAdd(avgTime uint64) {
	atomic.AddUint64(sl.WriteAvgTime, avgTime)
}
func (sl SpeedLimit) ReadExecCountAdd() {
	atomic.AddInt64(sl.ReadExecCount, 1)
}
func (sl SpeedLimit) WriteExecCountAdd() {
	atomic.AddInt64(sl.WriteExecCount, 1)
}

func (sl SpeedLimit) getCurrySpeedListStatus() bool {
	return atomic.LoadInt64(sl.Status) == 1
}

// 读写差异速率判断
func (sl SpeedLimit) getSpeedLimitStatus() (speedLimitStatus bool) {
	if atomic.LoadInt64(sl.Rwd) >= 20 { //src data rate > dst data rate   ==> write rate low
		sl.speedRestriction.Swap("RWD")
		speedLimitStatus = true
	} else if atomic.LoadUint64(sl.writeRate) > 1000 { //写入效率太差
		sl.speedRestriction.Swap("WR")
		speedLimitStatus = true
	} else if atomic.LoadUint64(sl.readRate) > 1000 { //读取效率太差
		sl.speedRestriction.Swap("RR")
		speedLimitStatus = true
	}
	return
}
func (sl SpeedLimit) getMonitorTerminal() SpeedLimitMonitor {
	return SpeedLimitMonitor{
		Schema: sl.Schema,
		Table:  sl.Table,
		SS:     fmt.Sprintf("%v", sl.speedRestricted.Load()),
		SR:     fmt.Sprintf("%v", sl.speedRestriction.Load()),
		RS:     fmt.Sprintf("%v", atomic.LoadInt64(sl.ReadSum)-atomic.LoadInt64(sl.lastSecondReadSum)),
		WS:     fmt.Sprintf("%v", atomic.LoadInt64(sl.WriteSum)-atomic.LoadInt64(sl.lastSecondWriteSum)),
		RWD:    fmt.Sprintf("%v", atomic.LoadInt64(sl.Rwd)),
		RR:     fmt.Sprintf("%v", atomic.LoadUint64(sl.readRate)),
		WR:     fmt.Sprintf("%v", atomic.LoadUint64(sl.writeRate)),
		RP:     fmt.Sprintf("%v", atomic.LoadInt64(sl.ReadParallel)),
		WP:     fmt.Sprintf("%v", atomic.LoadInt64(sl.WriteParallel)),
		BS:     fmt.Sprintf("%v", sl.SLimit.GetRefill("cap")),
		NS:     fmt.Sprintf("%v", fmt.Sprintf("%.2f", float64((atomic.LoadInt64(sl.ReadSum)-atomic.LoadInt64(sl.lastSecondReadSum))*sl.AvgRowsSize)/1024/1024)), //Network speed
		RBS:    fmt.Sprintf("%v", sl.ChunkSum),
	}
}
func (sl SpeedLimit) speedCapAdd() {
	sl.speedRestricted.Swap("Y")
	atomic.SwapInt64(sl.Status, 1)
	if sl.SLimit.GetRefill("cap") >= 1000 {
		sl.SLimit.SetCapacity(sl.SLimit.GetRefill("cap") / 1000)
	}
}
func (sl SpeedLimit) speedCapDel() {
	sl.speedRestricted.Swap("N")
	atomic.SwapInt64(sl.Status, 0)
	if sl.SLimit.GetRefill("cap") <= 100000000 {
		sl.SLimit.SetCapacity(sl.SLimit.GetRefill("cap") * 10)
	}
}
func (sl SpeedLimit) swapLastValue() {
	atomic.SwapInt64(sl.lastSecondReadSum, atomic.LoadInt64(sl.ReadSum))
	atomic.SwapInt64(sl.lastSecondWriteSum, atomic.LoadInt64(sl.WriteSum))
	atomic.SwapUint64(sl.lastSecondExecReadTime, atomic.LoadUint64(sl.ReadAvgTime))
	atomic.SwapUint64(sl.lastSecondExecWriteTime, atomic.LoadUint64(sl.WriteAvgTime))
	atomic.SwapInt64(sl.lastSecondExecReadCount, atomic.LoadInt64(sl.ReadExecCount))
	atomic.SwapInt64(sl.lastSecondExecWriteCount, atomic.LoadInt64(sl.WriteExecCount))
}
func (sl SpeedLimit) speedLimitAdd() {
	sl.speedCapAdd()
	if atomic.LoadUint64(sl.readRate) > 1000 || atomic.LoadUint64(sl.writeRate) > 1000 {
		if sl.SpeedAddFunc.FuncName != nil {
			sl.SpeedAddFunc.FuncName("", sl.SpeedAddFunc.Params)
		}
	}
}
func (sl SpeedLimit) speedLimitDel() {
	sl.speedCapDel()
	if atomic.LoadUint64(sl.readRate) < 501 && atomic.LoadInt64(sl.Rc) > 0 && atomic.LoadUint64(sl.writeRate) < 501 && atomic.LoadInt64(sl.Wc) > 0 {
		if sl.SpeedDelFunc.FuncName != nil {
			sl.SpeedDelFunc.FuncName("", sl.SpeedAddFunc.Params)
		}
	}
}
func (sl SpeedLimit) readWriteRate() {
	if atomic.LoadInt64(sl.ReadSum) > 0 {
		atomic.SwapInt64(sl.Rwd, (atomic.LoadInt64(sl.ReadSum)-atomic.LoadInt64(sl.WriteSum))*100/(atomic.LoadInt64(sl.ReadSum)))
	}
}
func (sl SpeedLimit) SpeedMonitor() {
	sl.Wg.Add(1)
	ll := time.NewTicker(time.Duration(1) * time.Second)
	go func() {
		defer func() {
			if sl.DeferFuncQuit.FuncName != nil {
				sl.DeferFuncQuit.FuncName("", sl.DeferFuncQuit.Params)
			}
			sl.Wg.Done()
		}()
		for {
			select {
			case <-ll.C:
				//获取读取端和写入端的差异值
				sl.readWriteRate()
				sl.getReadRate()
				sl.getWriteRate()
				if sl.getSpeedLimitStatus() {
					sl.speedLimitAdd()
				} else {
					sl.speedLimitDel()
				}
				if sl.SendResFunc.FuncName != nil {
					sl.SendResFunc.FuncName("", sl.getMonitorTerminal())
				}
				sl.swapLastValue()
			case <-sl.SpeedEndDone:
				return
			}
		}
	}()
}

func sendMsgAction(q map[string]any, c any) (ms map[string]any) {
	ms = make(map[string]any)
	for k, v := range q {
		if strings.EqualFold(k, "sendMsgKey") {
			ms[v.(string)] = c
		} else {
			ms[k] = v
		}
	}
	return
}
func (mm MonitorMsgMq) SendMsgF(msg map[string]any) bool {
	var topic string
	if q, ok := msg["topic"]; ok {
		topic = fmt.Sprintf("%v", q)
	}
	if err := mm.SendMsg.MsgProducer.Send(Message{
		Topic:      topic,
		Body:       []byte(mm.SendMsg.Table),
		Properties: msg,
	}); err != nil {
		return false
	}
	return true
}

// ChanMonitorSendMsg 循环读取管道并发送消息
func (mm MonitorMsgMq) ChanMonitorSendMsg(event string) {
	for {
		if mm.SpeedLimitSwitch {
			if mm.Speed.getCurrySpeedListStatus() && !mm.Speed.SLimit.TryAcquire() {
				continue
			}
		}
		select {
		case c, ok := <-mm.SendMsg.MsgModifyKey:
			if !ok {
				if mm.SendMsg.MsgFinishFunc.FuncName != nil {
					mm.SendMsg.MsgFinishFunc.FuncName("", mm.SendMsg.MsgFinishFunc.Params)
				}
				return
			} else {
				if mm.SendMsg.MsgSendFunc.FuncName != nil {
					mm.SendMsg.MsgSendFunc.FuncName("", mm.SendMsg.MsgSendFunc.Params)
					if !mm.SendMsgF(sendMsgAction(mm.SendMsg.MsgData, c)) {
						return
					}
				}
				if mm.SendMsg.MsgSuccessfulFunc.FuncName != nil {
					mm.SendMsg.MsgSuccessfulFunc.FuncName("", mm.SendMsg.MsgSuccessfulFunc.Params)
				}
			}
		}
	}
}
func (mm MonitorMsgMq) execFunc(event string, c any, baseProduceParameter any) (err error) {
	var params []any
	event = fmt.Sprintf("%v -> execFunc ", event)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	log.SendMsg().Debug(event, " start run exec Func.")
	if baseProduceParameter == nil || len(mm.ExecFunc) == 0 {
		log.SendMsg().Debug(event, "execFunc is nil. will continue")
		return
	}
	params = append(params, baseProduceParameter)
	for _, ef := range mm.ExecFunc {
		if ef.FuncName == nil {
			log.SendMsg().Debug(event, "sub execFunc is nil. will continue")
			continue
		}
		var paramsNew = params
		if ef.Params == nil {
			paramsNew = append(paramsNew, c, nil)
		} else {
			switch ef.Params.(type) {
			case []any:
				paramsNew = append(paramsNew, c)
				paramsNew = append(paramsNew, ef.Params.([]any)...)
			case any:
				paramsNew = append(paramsNew, c, ef.Params)
			}
		}
		ef.FuncName(event, paramsNew)
	}
	return
}
func (mm MonitorMsgMq) continueFunc(event string, c any, baseProduceParameter any) (rr bool) {
	event = fmt.Sprintf("%v -> %v ", event, " continueFunc")
	defer func() {
		if ref.RecoverPanic(event, recover()) != nil {
			rr = true
			return
		}
	}()
	if c == nil {
		return true
	}
	log.SendMsg().Debug(event, "start run continue Func.")
	if mm.ContinueFunc.FuncName == nil {
		log.SendMsg().Debug(event, "continueFunc is nil")
		return
	}
	var paramsNew []any
	if baseProduceParameter != nil {
		paramsNew = append(paramsNew, baseProduceParameter)
	}
	if mm.ContinueFunc.Params == nil {
		paramsNew = append(paramsNew, c)
	} else {
		switch mm.ContinueFunc.Params.(type) {
		case []any:
			paramsNew = append(paramsNew, c)
			paramsNew = append(paramsNew, mm.ContinueFunc.Params.([]any)...)
		case any:
			paramsNew = append(paramsNew, c, mm.ContinueFunc.Params)
		}
	}
	r := mm.ContinueFunc.FuncName(event, paramsNew)
	switch r.(type) {
	case bool:
		rr = r.(bool)
	}
	log.SendMsg().Debug(event, "continueFunc end run continue Func. result is ", rr)
	return
}
func (mm MonitorMsgMq) quitFunc(event string, c any) {
	event = fmt.Sprintf("%v -> %v ", event, "quitFunc")
	var params []any
	if mm.NormalQuit.FuncName == nil {
		log.MainLog().Warn(event, "quitFunc is nil. not exec quit func.")
		return
	}
	if mm.NormalQuit.Params == nil {
		params = []any{c}
	} else {
		switch mm.NormalQuit.Params.(type) {
		case []any:
			if c != nil {
				params = append(params, c)
			}
			if len(mm.NormalQuit.Params.([]any)) > 0 {
				params = append(params, mm.NormalQuit.Params.([]any)...)
			}
		case any:
			if c != nil {
				params = append(params, c)
			}
			if mm.NormalQuit.Params != nil {
				params = append(params, mm.NormalQuit.Params)
			}
		}
	}
	log.MainLog().Debug(event, "start run quit func.")
	mm.NormalQuit.FuncName(event, params)
	return
}
func (mm MonitorMsgMq) NormalSendMsg(role, event string, pool *ants.Pool, c, baseProduceParameter any) (err error) {
	event = fmt.Sprintf("%v -> NormalSendMsg", event)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	if !mm.CurryLimit.Switch {
		err = mm.execFunc(event, c, baseProduceParameter)
		return
	}
	log.SendMsg().Debug(event, " start run NormalSendMsg.")
	mm.MsgCurryAdd()
	mm.MsgCurryWgAdd()
	if err = pool.Submit(func() {
		log.SendMsg().Debug(event, " thread run exec Func.", c, " ", baseProduceParameter)
		defer func() {
			if mm.CurryLimit.Switch && !mm.CurryLimit.ManualDone {
				mm.MsgCurryDone()
				mm.MsgCurryWgDone()
			}
		}()
		if err = mm.execFunc(event, c, baseProduceParameter); err != nil {
			log.ErrorLog().Error(event, err)
		}
	}); err != nil {
		log.ErrorLog().Error(event, err)
	}
	return
}
func (mm MonitorMsgMq) initPool() (pool *ants.Pool, err error) {
	// 使用 ants 线程池
	// 从全局配置中获取池大小，如果未设置则使用默认值
	var poolSize int
	if cap(mm.CurryLimit.CurrySum) <= 0 {
		poolSize = 10 // 默认池大小
	} else {
		poolSize = cap(mm.CurryLimit.CurrySum)
	}
	// 创建 goroutine 池
	pool, err = ants.NewPool(poolSize, ants.WithNonblocking(false))
	if err != nil {
		return
	}
	return
}
func (mm MonitorMsgMq) generalSeqLog(seq int) (res map[string]any) {
	res = make(map[string]any)
	if !mm.GeneralSeqLog {
		return nil
	}
	return map[string]any{"seq": seq, "logSeq": rand.NewSource(time.Now().UnixNano()).Int63()}
}
func (mm MonitorMsgMq) NormalChanSendMsg(role string) (err error) {
	var seq int
	var pool *ants.Pool
	var event = fmt.Sprintf(" %v -> NormalChanSendMsg ", role)
	log.SendMsg().Info(event, "Starting to send messages to the MQ...")
	// 添加 recover 机制防止 panic 导致程序崩溃
	defer func() {
		log.SendMsg().Info(event, "send messages to the MQ is completed.")
		err = ref.RecoverPanic(event, recover())
		mm.quitFunc(event, nil)
	}()
	if pool, err = mm.initPool(); err != nil {
		log.ErrorLog().Error(event, err)
		return
	}
	defer pool.Release() // 确保池被释放
br:
	for {
		select {
		case c, ok := <-mm.SendMsg.MsgModifyKey:
			if !ok {
				log.SendMsg().Info(event, "send messages to the MQ is completed.")
				break br
			}
			seq++
			if mm.continueFunc(event, c, mm.generalSeqLog(seq)) {
				log.SendMsg().Info(event, "continue to send messages to the MQ...")
				continue
			}
			if err = mm.NormalSendMsg(role, event, pool, c, mm.generalSeqLog(seq)); err != nil {
				log.ErrorLog().Error(event, err)
				return
			}
		}
	}
	if mm.CurryLimit.Wg != nil {
		mm.CurryLimit.Wg.Wait()
	}
	log.SendMsg().Info(event, "Wait for the running thread to end...")
	for {
		if pool.Waiting() == 0 && pool.Running() == 0 {
			return
		} else {
			log.SendMsg().Warn(event, fmt.Sprintf("Wait for the running thread to end. Current status: waiting:%v Running:%v", pool.Waiting(), pool.Running()))
		}
	}
}
