package ckR

import (
	"bytes"
	"database/sql"
	"db2s/Fp"
	ea "db2s/encryptionAlgorithm"
	"db2s/full"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/ref"
	"db2s/table"
	mq "db2s/topic-mq"
	"errors"
	"fmt"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

func countDirectWay(message mq.Message) (s, d countRowsMeta, err error) {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	//var sum int64
	//var r []countRowsMeta
	//for drive, db := range map[string]map[string]*sql.DB{GlobalPConfigs.dSns.SrcDBName: GlobalPConfigs.SDB, GlobalPConfigs.dSns.DestDBName: GlobalPConfigs.DDB} {
	//if sum, err = table.Rows(drive, plan.Object, db); err != nil {
	//	return
	//}
	//rr := countRowsMeta{rows: sum}
	//if sum < 10000000 {
	//	rr.exec = true
	//}
	//	rr.exec = true
	//	r = append(r, rr)
	//}
	//s, d = r[0], r[1]
	plan.MQ.FirstObjectProductAdd()
	plan.MQ.FirstObjectCustomerAdd()
	plan.MQ.FirstMsgFinishMarkAdd()
	plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.FirstMsg.ProductMsg)
	plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.FirstMsg.ProductMsg)
	plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.ProductMsg)
	plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.ProductMsg)
	return countRowsMeta{exec: true}, countRowsMeta{exec: true}, nil
}
func srcCountInput(drive, schemaName, tableName string, plan *SchedulePlan2, db map[string]*sql.DB) table.SumS {
	//var ShardName, backendTableName string
	//_, ShardName, backendTableName = plan.getPartShardMeta(partitionName1, "source")
	return table.SumS{
		DBObject:         drive,
		DB:               db,
		Schema:           schemaName,
		Table:            tableName,
		BackendTableName: tableName,
		ShardName:        "single",
		RowsLimit:        plan.Object.RowsLimit,
		SqlFilter: global.SqlFilter{
			WhereSql: plan.TableAttributes.WhereSql,
			WhereAdd: plan.TableAttributes.WhereAdd,
		},
		Input: global.StartPartConfigInputP{
			Scn: GlobalPConfigs.rules.Sync.Scn,
		},
	}
}
func countExec(message mq.Message, object string) {
	var (
		sum   int64
		err   error
		event = "[countExec]"
		plan  = message.Properties["plan"].(*SchedulePlan2)
	)
	go func() {
		defer func() {
			if err != nil {

			}
		}()
		switch object {
		case "source":
			s := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName}, "left", plan.Object.Role)
			s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
			var countResult any
			if countResult, err = full.GetFullCountST(s); err != nil {
				err = ref.ErrAddPrintf(event, err)
				PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorActive(plan, fmt.Sprintf("%v", err)))
				log.ErrorLog().Error(fmt.Sprintf("exec count for table %v fail. error is {%v}", getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
				return
			} else {
				sum = countResult.(int64)
			}
			PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": sum, "insert": int64(0)})
			atomic.AddInt64(plan.TPod.Rows.SourceNumber, sum)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.FirstMsg.CustomerMsg)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.CustomerObject)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.CustomerMsg)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.ProductSendMsgFinishMark)
		case "dest":
			s := full.NewFull([]string{GlobalPConfigs.dSns.DestDBName}, "left", plan.Object.Role)
			s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
			var countResult any
			if countResult, err = full.GetFullCountST(s); err != nil {
				err = ref.ErrAddPrintf(event, err)
				PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorActive(plan, fmt.Sprintf("%v", err)))
				log.ErrorLog().Error(fmt.Sprintf("exec count for table %v fail. error is {%v}", getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
				return
			} else {
				sum = countResult.(int64)
			}
			PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": int64(0), "insert": sum})
			atomic.AddInt64(plan.TPod.Rows.DestNumber, sum)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.FirstMsg.CustomerMsg)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.CustomerObject)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.CustomerMsg)
			plan.MQ.ExecSecondMsgCustomerMqSum(plan.MQ.SecondMsg.ProductSendMsgFinishMark)
		}
	}()
	return
}

type countRowsMeta struct {
	rows int64
	exec bool
}
type CountRunTransfer struct {
	sRowsM, dRowsM countRowsMeta
}

// error info column不一致
func errorActive(plan *SchedulePlan2, err string) (errStr string) {
	if strings.EqualFold(err, "column length abnormal") { //表结构不一致
		var srcColumn, dstColumn []string
		var returnError bytes.Buffer
		aa := &ea.CheckSumTypeStruct{}
		add, del := aa.Arrcmp(srcColumn, dstColumn)
		for _, v := range add {
			returnError.WriteString(fmt.Sprintf("add column %v ,", v))
		}
		for _, v := range del {
			returnError.WriteString(fmt.Sprintf("drop column %v,", v))
		}
		errStr = returnError.String()
		returnError.Reset()
	} else {
		errStr = err
	}
	return
}
func initErrorAction(plan *SchedulePlan2, err error, podInfo string) {
	atomic.AddInt64(plan.Status.ErrEnd, -1)
	PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorActive(plan, fmt.Sprintf("%v", err)))
	if PlanContext.TaskBarSubsStatusGet(plan.subTaskInfo) != -2 {
		PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
	}
	PlanContext.TPodTableStatusSwap(plan.TPod.Status, "error")
	plan.TPod.Result = podInfo
	newTer.Write(plan.TPod)
}
func runErrorAction(message mq.Message, err string) bool {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	for _, v := range GlobalPConfigs.rules.Sync.IgError {
		if strings.Contains(fmt.Sprintf("%v", err), strconv.Itoa(v)) {
			return true
		}
	}
	taskObject, ok := message.Properties["taskObject"]
	if ok {
		if taskObject != nil {
			atomic.SwapInt64(taskObject.(Fp.TaskDistribution).ErrEnd, -1)
		}
	}
	initErrorAction(plan, errors.New(err), "errors")
	return false
}

// countWayListeners CountWay 监听执行器
func countWayListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var (
		tableName = string(message.Body)
		topic     = CountPoint
		event     = "[countWay]"
		logSeq    int64
		err       error
		transfer  = &CountRunTransfer{}
	)
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var wg sync.WaitGroup
	plan.Status.SubTaskBeginTime = time.Now().UnixNano()
	if GlobalPConfigs.result.Teletypewriter == "bar" {
		PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "running")
		infoWriteInfo(fmt.Sprintf("(%d) %s Add task Bar Run object %v successfully", logSeq, event, tableName))
	}
	plan.MQ.Wg, plan.MQ.Speed.Wg = &wg, &wg
	plan.MQ.AbnormalQuit, plan.MQ.NormalQuit = global.EFunc{FuncName: mqStatusMonitorAbnormalQuit, Params: plan}, global.EFunc{FuncName: mqStatusMonitorNormalQuit, Params: plan}
	plan.MQ.MqTableStatusMonitor(getTableName(plan.Object.Schema, plan.Object.Table))
	if transfer.sRowsM, transfer.dRowsM, err = countDirectWay(message); err != nil {
		runErrorAction(message, fmt.Sprintf("%v", err))
		return mq.ReconsumeLater
	}
	plan.MQ.FirstObjectProductSwap(int64(1))
	if !sendMsg(getCheckMod(), topic, tableName, map[string]any{"logSeq": logSeq, "plan": message.Properties["plan"],
		"transfer": transfer, "err": plan.Status.ErrEnd,
		"subTask": plan.subTaskInfo, "topic": string(topic), "outFay": GlobalPConfigs.result.Teletypewriter}) {
		return mq.ReconsumeLater
	}
	wg.Wait()
	return mq.ConsumeSuccess
}

// countWayListeners CountWay 监听执行器
func countPointExecListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var (
		//tableName = string(message.Body)
		plan   = message.Properties["plan"].(*SchedulePlan2)
		dTopic = CountPoint
		//event     = "[countPointExec]"
		//logSeq    int64
	)
	defer deleteTopic(map[string]any{"dTopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic), "outFay": GlobalPConfigs.result.Teletypewriter})
	defer func() {
		//atomic.AddInt64(plan.mqProductCustomerMonitor.MqCustomerSeq, 1)
		//execSecondMsgCustomerMqSum(plan.mqProductCustomerMonitor.SecondMsg.CustomerMsg)
	}()
	if abnormalErrorQuit(plan) {
		return mq.ReconsumeLater
	}
	transfer := message.Properties["transfer"].(*CountRunTransfer)
	if transfer.sRowsM.exec {
		countExec(message, "source")
	}
	if transfer.dRowsM.exec {
		countExec(message, "dest")
	}
	//if !sendMsg(getCheckMod(), topic, tableName, map[string]any{"logSeq": logSeq, "plan": message.Properties["plan"], "taskObject": message.Properties["taskObject"],
	//	"label": message.Properties["label"], "insertPrefix": message.Properties["insertPrefix"], "chunkData": stt,
	//	"err": plan.Status.ErrEnd, "subTask": plan.subTaskInfo, "sourcePartitionName": message.Properties["sourcePartitionName"],
	//	"topic": string(topic), "outFay": GlobalPConfigs.result.Teletypewriter}) {
	//	return mq.ReconsumeLater
	//}
	return mq.ConsumeSuccess
}
func countPartitionExecListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	//var (
	//	tableName = string(message.Body)
	//	plan      = message.Properties["plan"].(*SchedulePlan2)
	//	dTopic    = CountWay
	//	event     = "[countWay]"
	//	logSeq    int64
	//)
	//defer deleteTopic(map[string]any{"dTopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic), "outFay": GlobalPConfigs.result.Teletypewriter})
	//plan.Status.SubTaskBeginTime = time.Now().UnixNano()
	//if GlobalPConfigs.result.Teletypewriter == "bar" {
	//	PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "running")
	//	infoWriteInfo(fmt.Sprintf("(%d) %s Add task Bar Run object %v successfully", logSeq, event, tableName))
	//}
	//plan.countDirectWay(message, &mqTaskSum, logSeq)
	return mq.ConsumeSuccess
}
func countIndexExecListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var (
		plan = message.Properties["plan"].(*SchedulePlan2)
	)
	//defer deleteTopic(map[string]any{"dTopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic), "outFay": GlobalPConfigs.result.Teletypewriter})
	plan.Status.SubTaskBeginTime = time.Now().UnixNano()
	return mq.ConsumeSuccess
}

//	func (sp *SchedulePlan2) queryPartitionNameGarth(object string) (instanceStr []string) {
//		switch object {
//		case "source":
//			instanceStr = sp.Object.PartitionData.Name.Source
//		case "dest":
//			instanceStr = sp.Object.PartitionData.Name.Dest
//		}
//		return
//	}
//
//	func (sp *SchedulePlan2) queryPartitionNameShard(object string, partitionName string) (instanceStr string) {
//		switch object {
//		case "source":
//			instanceStr = sp.Object.PartitionData.Shard.Source[partitionName]
//		case "dest":
//			instanceStr = sp.Object.PartitionData.Shard.Dest[partitionName]
//		}
//		return
//	}
//
//	func (sp *SchedulePlan2) queryPartitionNameId(object string, partitionName string) (instanceStr string) {
//		switch object {
//		case "source":
//			instanceStr = sp.Object.PartitionData.ShardId.Source[partitionName]
//		case "dest":
//			instanceStr = sp.Object.PartitionData.ShardId.Dest[partitionName]
//		}
//		return
//	}
//
//	func (sp *SchedulePlan2) queryTableSumActive(object string, sum int64) {
//		switch object {
//		case "source":
//			PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": sp.subTaskInfo, "select": sum})
//			//atomic.AddInt64(terminalPods[getTableName(sps.Object.Schema, sps.Object.Table)].Count.SourceNumber, sum)
//			atomic.AddInt64(sp.TPod.Count.SourceNumber, sum)
//		case "dest":
//			PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": sp.subTaskInfo, "insert": sum})
//			//atomic.AddInt64(terminalPods[getTableName(sps.Object.Schema, sps.Object.Table)].Count.DestNumber, sum)
//			atomic.AddInt64(sp.TPod.Count.DestNumber, sum)
//		}
//	}
//
//	func (sp SchedulePlan2) missCountActive(message mq.Message) {
//		var (
//			logSeq      = message.Properties["logSeq"].(int64)
//			QuerySource = message.Properties["QueryObject"].(string)
//			wg          sync.WaitGroup
//			curry       = make(chan struct{}, GlobalPConfigs.rules.Mtc)
//		)
//		for _, v := range sp.queryPartitionNameGarth(QuerySource) {
//			atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelProductSend, 1)
//			atomic.AddInt64(sp.mqProductCustomerMonitor.MqStartTiming, 1)
//			wg.Add(1)
//			curry <- struct{}{}
//			go func(partitionName, QuerySource1 string) {
//				defer func() {
//					<-curry
//					atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelCustomerReceive, 1)
//					wg.Done()
//				}()
//				if p, err := tableEr.Sum(tableEr.TableSum{DBType: queryObject(QuerySource1)}).Count(global.TableSumInput{
//					TableInfo: global.TableInfo{
//						Schema: func(g string) (y string) {
//							switch g {
//							case "source":
//								y = sp.Object.Schema
//							case "dest":
//								y = sp.Object.MSchema
//							}
//							return
//						}(QuerySource1),
//						Table: func(g string) (y string) {
//							switch g {
//							case "source":
//								y = sp.Object.Table
//							case "dest":
//								y = sp.Object.MTable
//							}
//							return
//						}(QuerySource1),
//						PartitionName: partitionName,
//						PartitionId:   sp.queryPartitionNameId(QuerySource1, partitionName),
//						ShardName:     sp.queryPartitionNameShard(QuerySource1, partitionName),
//					},
//					Input: global.StartPartConfigInputP{
//						Scn: GlobalPConfigs.rules.RowsR.Scn,
//					},
//					SqlFilter: global.SqlFilter{
//						//WhereSql: whereSql,
//					},
//					Db:     queryDb(QuerySource1),
//					LogSeq: logSeq,
//				}); err != nil {
//					fmt.Println("----error:", err)
//					return
//				} else {
//					sp.queryTableSumActive(QuerySource1, int64(p))
//				}
//			}(v, QuerySource)
//		}
//		wg.Wait()
//	}
func (sp *SchedulePlan2) indexCountActive(message mq.Message) {
	//var (
	//	logSeq            = message.Properties["logSeq"].(int64)
	//	QuerySource       = message.Properties["QueryObject"].(string)
	//	taskObject1       = scheduleTask.TaskDistribution{}
	//	tableName         = string(message.Body)
	//	topic             = CountIndexSecondQuerySql
	//	errEnd      int64 = 0
	//)
	//taskObject1 = scheduleTask.TaskDistribution{
	//	LogSeq:     logSeq,
	//	DataSource: QuerySource,
	//	TableObject: scheduleTask.TableObject{
	//		Schema: sp.Object.Schema,
	//		Table:  sp.Object.Table,
	//	},
	//	Db: scheduleTask.ConnDb{
	//		SourceObject: GlobalPConfigs.dSns.SrcDBName,
	//		DestObject:   GlobalPConfigs.dSns.DestDBName,
	//		Sdb:          GlobalPConfigs.SDB,
	//		Ddb:          GlobalPConfigs.DDB,
	//	},
	//	TableInfo: scheduleTask.TableInfo{
	//		TableColumn: sp.Object.TableColData.DColumnInfo,
	//		//IndexColumn: sps.ReadOptimizer.IndexColumn,
	//		IndexColumn: []string{"_hidden_pk_"},
	//		IndexName:   sp.ReadOptimizer.IndexName,
	//	},
	//	StaticP: scheduleTask.StaticParameter{
	//		CpLength:    GlobalPConfigs.rules.QueueSize,
	//		MulFactor:   sp.ReadOptimizer.Thread,
	//		ChunkNumber: sp.ReadOptimizer.ChunkSum,
	//		Scn:         GlobalPConfigs.rules.Sync.Scn,
	//	},
	//	ErrEnd: &errEnd,
	//}
	//for _, v := range sp.queryPartitionNameGarth(QuerySource) {
	//	var labelChecklist = make(map[string][]string)
	//	taskObject1.TableObject.PartitionName = v
	//	_, taskObject1.TableObject.ShardName, taskObject1.TableObject.PartitionId = sp.partitionMap(v, "source")
	//	var tableWhereAdd string
	//	if w1, ok1 := GlobalPConfigs.schema.WhereAdd[getSchemaToTableMap(sp.Object.Schema, sp.Object.Table)]; ok1 {
	//		tableWhereAdd = w1
	//	}
	//	sqlWhere := taskObject1.IsSegmentValue(tableWhereAdd, sp.ReadOptimizer.IndexColumn)
	//	//send message
	//	l := chanSendMsg{
	//		Object:   "first",
	//		SendData: sqlWhere,
	//		SendMsgInfo: SendMsgInfo{
	//			CheckMod:  getCheckMod(),
	//			Topic:     topic,
	//			TableName: tableName,
	//		},
	//		SendMsg:          sendMsg,
	//		SpeedLimit:       sp.TableSpeedLimit.SpeedLimit,
	//		SpeedLimitSwitch: true,
	//		Status:           sp.TableSpeedLimit.Status,
	//		FatherSon: FatherSon{
	//			FatherTags: map[string]any{"logSeq": logSeq, "plan": message.Properties["plan"],
	//				"taskObject": taskObject1,
	//				"sendMsgKey": "segmentSql", "label": map[string]any{"no": []string{}},
	//				"err": sp.Status.ErrEnd, "subTask": sp.subTaskInfo, "topic": string(topic),
	//				"outFay": GlobalPConfigs.result.Teletypewriter},
	//			FatherSonTags: labelChecklist,
	//		},
	//		SendMsgEnd: func() {
	//		},
	//	}
	//	if !chanMonitorSendMsg(&l) {
	//		return
	//	}
	//}
}

// countIndexSegmentQueryListeners CountIndexQuery 监听执行器
func countIndexSegmentQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	//var (
	//	//topic          = CountIndexSecondQuerySql
	//	dTopic = CountIndexSegmentQuery
	//	event  = "[isSegmentQueryListeners]"
	//	//tableName = string(message.Body)
	//	plan   = message.Properties["plan"].(*SchedulePlan2)
	//	logSeq = message.Properties["logSeq"].(int64)
	//)
	//defer deleteTopic(map[string]any{"logSeq": logSeq, "message": message, "event": event, "dTopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic)})
	//QueryDirect := message.Properties["countDirect"].(bool)
	//if QueryDirect {
	//	plan.missCountActive(message)
	//} else {
	//	plan.indexCountActive(message)
	//}
	return mq.ConsumeSuccess
}

// countIndexQueryListeners CountIndexQuery 监听执行器
func countIndexSecondQuerySqlListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var (
		//	topic               = CountIndexQueryData
		//tableName = string(message.Body)
		dTopic = CountIndexSecondQuerySql
		logSeq = message.Properties["logSeq"].(int64)
		event  = ""
		//	fatherSonTags       = make(map[string][]string)
		plan = message.Properties["plan"].(*SchedulePlan2)
		//	shardTable          = message.Properties["shardTable"].(string)
		//	shardName           = message.Properties["shardName"].(string)
		//	segmentSql          = message.Properties["segmentSql"].(map[string]string)
		deleteMapInfo = map[string]any{"logseq": logSeq, "message": message, "event": event, "dtopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic)}
	)
	defer deleteTopic(deleteMapInfo)
	segmentSql := message.Properties["segmentSql"].(global.StartPart)
	fmt.Println("---ll:", segmentSql.Limit.Pagination)
	//QuerySource := message.Properties["QuerySource"].(string)
	//indexColumn := indexColumnDispos(message)
	//schema, table := schemaTableDispos(message)
	//tableMeta := tableMetaDispos(message)
	//ll := idxcDispos(message)
	//ll.TableColumn = tableMeta
	//ll.Schema = schema
	//ll.Table = table
	//ll.ColumnName = indexColumn
	//ll.ShardName = shardName
	//if !chanMonitorSendMsg(scheduleTask.GetTaskDistributionO(map[string]any{
	//	"cpLength":         GlobalPConfigs.rules.QueueSize,
	//	"whereColumn":      indexColumn,
	//	"schema":           schema,
	//	"table":            table,
	//	"logSeq":           logThreadSeq,
	//	"tableLimit":       plan.tableLimit,
	//	"chunkStartSource": QuerySource,
	//	"sdb":              GlobalPConfigs.SDB,
	//	"ddb":              GlobalPConfigs.DDB,
	//	"chunkStartFs":     ll,
	//	"chunkStartFd":     ll,
	//	"chunkNumber":      GlobalPConfigs.rules.ChanRowCount,
	//}).IndexConcurrentQuery(segmentSql),
	//	getCheckMod(), topic, tableName, sendMsg, map[string]any{"logSeq": logThreadSeq,
	//		"sendMsgKey": "sqlwhere", "shardTable": shardTable, "shardName": shardName, "QuerySource": QuerySource,
	//		"label":   message.Properties["label"],
	//		"subTask": plan.subTaskInfo, "plan": message.Properties["plan"], "topic": string(topic),
	//		"outFay": GlobalPConfigs.result.Teletypewriter,
	//	}, fatherSonTags, func() {
	//	}) {
	//	return mq.ReconsumeLater
	//}
	//if atomic.LoadInt64(plan.errEnd) > -1 {
	//	plan.mqProductCustomerMonitor.ProductMessage <- fatherSonTags
	//	atomic.AddInt64(plan.mqProductCustomerMonitor.mqLevelCustomerReceive, 1)
	//}
	return mq.ConsumeSuccess
}

// countIndexQueryDataListeners CountIndexQuery 监听执行器
func countIndexQueryDataListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	//var (
	//	dTopic              = CountIndexQueryData
	//	topic               = CountIndexResult
	//	tableName           = string(message.Body)
	//	plan                = message.Properties["plan"].(map[string]*SchedulePlan2)[tableName]
	//	querySource         = message.Properties["QuerySource"].(string)
	//	logThreadSeq        = message.Properties["logSeq"].(int64)
	//	event               = ""
	//	logseq        int64 = 1
	//	deleteMapInfo       = map[string]any{"logseq": logseq, "message": message, "event": event, "dtopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic)}
	//)
	//defer deleteTopic(deleteMapInfo)
	//if d1, err := countTableQuery(message); err != nil {
	//	fmt.Println(err)
	//} else {
	//	if !sendMsg(getCheckMod(), topic, tableName, map[string]any{"logSeq": logThreadSeq, "QuerySource": querySource,
	//		"tableCount": d1, "subTask": plan.subTaskInfo, "plan": message.Properties["plan"], "topic": string(topic),
	//		"outFay": GlobalPConfigs.result.Teletypewriter, "label": message.Properties["label"],
	//	}) {
	//		fmt.Println("------send message error")
	//	}
	//}
	return mq.ConsumeSuccess
}

// countIndexQueryDataListeners CountIndexQuery 监听执行器
func countIndexResultListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	//var (
	//	tableName           = string(message.Body)
	//	plan                = message.Properties["plan"].(map[string]*SchedulePlan2)[tableName]
	//	dTopic              = CountIndexResult
	//	event               = ""
	//	logseq        int64 = 1
	//	deleteMapInfo       = map[string]any{"logseq": logseq, "message": message, "event": event, "dtopic": dTopic, "subTask": plan.subTaskInfo, "topic": string(dTopic)}
	//)
	//defer deleteTopic(deleteMapInfo)
	//QuerySource := message.Properties["QuerySource"].(string)
	//d1 := message.Properties["tableCount"].(int64)
	//switch QuerySource {
	//case "src":
	//	atomic.AddInt64(terminalPods[getTableName(plan.schema, plan.table)].Count.SourceNumber, d1)
	//	if GlobalPConfigs.result.Teletypewriter == "bar" {
	//		PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": d1, "insert": int64(0)})
	//	}
	//case "dst":
	//	atomic.AddInt64(terminalPods[getTableName(plan.schema, plan.table)].Count.DestNumber, d1)
	//	if GlobalPConfigs.result.Teletypewriter == "bar" {
	//		PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": int64(0), "insert": d1})
	//	}
	//}
	//if GlobalPConfigs.result.Teletypewriter == "bar" {
	//	PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "consumption": int64(1)})
	//}
	//plan.TotalCount = int64(math.Max(float64(*plan.cssum), float64(*plan.cdsum)))
	//atomic.AddInt64(plan.mqProductCustomerMonitor.mqCustomerSeq, 1)
	return mq.ConsumeSuccess
}
