package ckR

//
//import (
//	"database/sql"
//	"fmt"
//	"db2s/dbExec"
//	ea "db2s/encryptionAlgorithm"
//	"db2s/global"
//	"db2s/outPut"
//	"db2s/tableEr"
//	"math/rand"
//	"os"
//	"reflect"
//	"runtime"
//	"strconv"
//	"strings"
//	"sync"
//	"sync/atomic"
//	"time"
//)
//
//// sendDatafixMqMsgStatus 判断是否发送差异数据修复消息
////func (sps *SchedulePlan2) sendDatafixMqMsgStatus() bool {
////	var event string
////	if atomic.LoadInt64(sps.tableAbnormalStatus) == 1 && strings.EqualFold(GlobalPConfigs.repair.DataFix, "no") {
////		global.Wlog.Warn(fmt.Sprintf("(%d) %s The current table data is in an \"abnormal\" state, the repair parameter is \"no\", and it will exit..", sps.logThreadSeq, event))
////		return false
////	}
////	return true
////}
//
//// 出现错误集体处理
////func (sps SchedulePlan2) errorAction(message mq.Message, logseq int64, err any) {
////	taskObject, ok := message.Properties["taskObject"]
////	taskObjectNeglect, ok1 := message.Properties["taskObjectNeglect"]
////	if ok && ok1 {
////		if !taskObjectNeglect.(bool) && taskObject != nil {
////			atomic.SwapInt64(taskObject.(scheduleTask.TaskDistributionO).ErrEnd, -1)
////		}
////	}
////	atomic.AddInt64(sps.errEnd, -1)
////	PlanContext.TaskBarSubsErrorSwap(sps.subTaskInfo, errorDispos(logseq, &sps, fmt.Sprintf("%v", err)))
////	if PlanContext.TaskBarSubsStatusGet(sps.subTaskInfo) != -2 {
////		PlanContext.TaskBarSubsStatusSwap(sps.subTaskInfo, "error")
////	}
////	PlanContext.TpodTableStatusSwap(terminalPods[getTableName(sps.schema, sps.table)].Rows.Status, "error")
////}
////func (sps *SchedulePlan2) modifyDatafixMqMsgStatus() {
////	atomic.SwapInt64(sps.tableAbnormalStatus, 1)
////}
//
////func errorDispos(_ int64, plan *SchedulePlan2, err string) (errstr string) {
////	if strings.EqualFold(err, "column length abnormal") { //表结构不一致
////		var srcColumn, dstColumn []string
////		var returnerror bytes.Buffer
////		for _, v := range plan.tableColData.SColumnInfo {
////			srcColumn = append(srcColumn, v["columnName"])
////		}
////		for _, v := range plan.tableColData.DColumnInfo {
////			dstColumn = append(dstColumn, v["columnName"])
////		}
////		aa := &ea.CheckSumTypeStruct{}
////		add, del := aa.Arrcmp(srcColumn, dstColumn)
////		for _, v := range add {
////			returnerror.WriteString(fmt.Sprintf("add column %v ,", v))
////		}
////		for _, v := range del {
////			returnerror.WriteString(fmt.Sprintf("drop column %v,", v))
////		}
////		errstr = returnerror.String()
////		returnerror.Reset()
////	} else {
////		errstr = err
////	}
////	return
////}
//
//// startWhereInit whereadd功能  根据库、表、索引列信息进行筛选，筛选出whereadd的sql语句和选择前导索引列
////func startWhereInit(schema, table string, indexColumn []string) (s, i string) {
////	if v, ok := GlobalPConfigs.schema.WhereAdd[fmt.Sprintf("%s%s%s", schema, global.SchemaTableSplist, table)]; ok && v != "" {
////		var idxcColumn string
////		var whereAdd = make(map[string]int)
////		for _, v1 := range strings.Split(strings.ToLower(v), " and ") {
////			if len(strings.Split(strings.TrimSpace(v1), " ")) > 0 {
////				whereAdd[strings.Split(strings.TrimSpace(v1), " ")[0]]++
////			}
////		}
////		if len(indexColumn) > 1 {
////			var ff []string
////			for _, v1 := range indexColumn {
////				if _, ok1 := whereAdd[strings.ToLower(v1)]; !ok1 {
////					ff = append(ff, v1)
////				}
////			}
////			if len(ff) == 0 {
////				return v, ""
////			}
////			idxcColumn = ff[0]
////		} else {
////			idxcColumn = indexColumn[0]
////		}
////		s, i = v, strings.ToUpper(idxcColumn)
////	} else {
////		i = indexColumn[0]
////	}
////	return
////}
//
//func (sps *SchedulePlan2) quit() {
//	atomic.AddInt64(sps.mqProductCustomerMonitor.mqCustomerSeq, 1)
//}
//
//// 源端无索引表查询总数
//func sampMissIndexCount(plan *SchedulePlan2, logseq int64) bool {
//	var (
//		event                        = "[sampMissIndexCount]"
//		vlog                         string
//		sourceTableSum, destTableSum uint64 = 0, 0
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	var (
//		partitionNameMap = map[string][]string{
//			"source": plan.samp.PartitionData.SourceName,
//			"dest":   plan.samp.PartitionData.DestName,
//		}
//		dbMap = map[string]map[string]*sql.DB{
//			"source": GlobalPConfigs.SDB,
//			"dest":   GlobalPConfigs.DDB,
//		}
//		shardMap = map[string]map[string]string{
//			"source": plan.samp.PartitionData.SourcePartitionShardMap,
//			"dest":   plan.samp.PartitionData.DestPartitionShardMap,
//		}
//		partitionIdMap = map[string]map[string]string{
//			"source": plan.samp.PartitionData.SourcePartitionShardIdMap,
//			"dest":   plan.samp.PartitionData.DestPartitionShardIdMap,
//		}
//		sumTable = map[string]*uint64{
//			"source": &sourceTableSum,
//			"dest":   &destTableSum,
//		}
//		wg sync.WaitGroup
//	)
//	for k, v := range partitionNameMap {
//		for _, partitionName1 := range v {
//			wg.Add(1)
//			go func(tableObject string, partitionName string) {
//				defer func() {
//					wg.Done()
//				}()
//				l := global.TableSumInput{
//					TableInfo: global.TableInfo{
//						Schema:        plan.schema,
//						Table:         plan.table,
//						PartitionName: partitionName,
//					},
//					Db:     dbMap[tableObject],
//					LogSeq: logseq,
//				}
//				if !strings.EqualFold(partitionName, "single") && len(partitionName) > 0 {
//					l.TableInfo.PartitionId = partitionIdMap[tableObject][partitionName]
//					l.TableInfo.ShardName = shardMap[tableObject][partitionName]
//				}
//				if t3, err := tableEr.Sum(tableEr.TableSum{DBType: k}).Count(l); err != nil {
//					vlog = fmt.Sprintf("(%d) %v %v Failed to obtain table data from source database count. error info is {%v}", logseq, callFuncInfo, event, err)
//					global.Wlog.Error(vlog)
//					PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorDispos(logseq, plan, fmt.Sprintf("%v", err)))
//					return
//				} else {
//					atomic.AddUint64(sumTable[tableObject], t3)
//				}
//			}(k, partitionName1)
//		}
//	}
//	wg.Wait()
//	atomic.AddUint64(plan.samp.CheckRows.SrcCount, *sumTable["source"])
//	atomic.AddUint64(plan.samp.CheckRows.DstCount, *sumTable["dest"])
//	atomic.SwapInt64(plan.subTaskInfo.Samp.SourceSum, int64(*sumTable["source"]))
//	atomic.SwapInt64(plan.subTaskInfo.Samp.DestSum, int64(*sumTable["dest"]))
//
//	atomic.AddInt64(terminalPods[getTableName(plan.schema, plan.table)].Samp.SrcSum, int64(*sumTable["source"]))
//	atomic.AddInt64(terminalPods[getTableName(plan.schema, plan.table)].Samp.DstSum, int64(*sumTable["dest"]))
//	if atomic.LoadUint64(sumTable["source"]) != atomic.LoadUint64(sumTable["dest"]) {
//		return false
//	}
//	vlog = fmt.Sprintf("(%d) %v %v Source database count obtains table data successfully. result is {%v}", logseq, callFuncInfo, event)
//	global.Wlog.Debug(vlog)
//	return true
//}
//
//// 根据设置的抽样比例，计算需要抽样的行数
//func (sps *SchedulePlan2) sampRatioRows() {
//	s := atomic.LoadUint64(sps.samp.CheckRows.SrcCount)
//	if s <= 0 {
//		return
//	}
//	if GlobalPConfigs.rules.SampR.Proportion <= 0 {
//		return
//	}
//	sampRows := s * uint64(GlobalPConfigs.rules.SampR.Proportion) / 100
//	if sampRows >= s {
//		atomic.SwapUint64(sps.samp.CheckRows.SampCount, s)
//		return
//	}
//	atomic.SwapUint64(sps.samp.CheckRows.SampCount, sampRows)
//}
//
//// 根据计算出来的抽样的行数，来计算需要抽取的块数
//func (sps *SchedulePlan2) sampChunkSum() {
//	s := atomic.LoadUint64(sps.samp.CheckRows.SampCount)
//	if s <= 0 {
//		return
//	}
//	sampChunk := s / uint64(GlobalPConfigs.rules.ChanRowCount)
//	if sampChunk <= 0 {
//		sampChunk = 1
//	}
//	atomic.SwapInt64(sps.subTaskInfo.Samp.SampProportionSum, int64(sampChunk))
//	atomic.SwapInt64(sps.subTaskInfo.Samp.SampProportionChunk, int64(sampChunk))
//	atomic.AddUint64(sps.samp.CheckRows.TokenBucketRich, atomic.LoadUint64(sps.samp.CheckRows.SrcCount)/uint64(GlobalPConfigs.rules.ChanRowCount))
//	atomic.SwapUint64(sps.samp.CheckRows.SampChunk, sampChunk)
//
//}
//
//// 根据计算出来的抽样的行数，来计算需要抽取的块数
//func (sps *SchedulePlan2) generalTokenBucket() {
//	var (
//		tokenBucketC = make(chan int64, atomic.LoadUint64(sps.samp.CheckRows.TokenBucketRich))
//	)
//	rand.Seed(time.Now().UnixNano())
//
//	uniqueNums := make(map[int64]bool)
//	for uint64(len(uniqueNums)) < atomic.LoadUint64(sps.samp.CheckRows.SampChunk) {
//		randomNum := rand.Int63n(int64(*sps.samp.CheckRows.TokenBucketRich)) + 1
//		if !uniqueNums[randomNum] {
//			uniqueNums[randomNum] = true
//		}
//	}
//	sps.samp.Bucket.RandomValue = uniqueNums
//	// 创建令牌桶
//	go func() {
//		for i := int64(0); i < int64(atomic.LoadUint64(sps.samp.CheckRows.TokenBucketRich)); i++ {
//			tokenBucketC <- i + int64(1)
//		}
//		close(tokenBucketC)
//	}()
//	sps.samp.Bucket.TokenBucket = tokenBucketC
//}
//
//// QueryTableSql 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型
//func (sps *SchedulePlan2) sampQueryTableSql(s global.IndexColumnStartPartValueP, partitionName string) (sqlStr map[string]*string, err error) {
//	var (
//		vlog  string
//		event = "[syncQueryTableSql]"
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	vlog = fmt.Sprintf("(%v) %v %s sync data source query chunk where is %v", s.LogSeq, callFuncInfo, event, s)
//	global.Wlog.Debug(vlog)
//	sqlStr = make(map[string]*string)
//	//查询该表的列名和列信息
//	for k, v := range map[string]dbExec.IndexColumnStruct{"source": sps.idxc, "dest": sps.dstIdxc} {
//		var ssql string
//		v.PartitionName = partitionName
//		if ssql, err = dbExec.TQuery(v).TmpGeneratingQuerySql(s); err != nil {
//			return nil, err
//		} else if ssql != "" {
//			sqlStr[k] = &ssql
//		}
//	}
//	return
//}
//
//// 根据返回数据进行行数统计和数据切割
//func (sps *SchedulePlan2) sampDataCountSplist(st string, drive string) (stt string, sum int64) {
//	var (
//		vlog string
//		err  error
//		aa   = &ea.CheckSumTypeStruct{}
//	)
//	if !strings.EqualFold(GlobalPConfigs.rules.RowsR.Options, "data") {
//		if strings.EqualFold(drive, "cluster") && strings.Contains(sps.sshardName, "partition") {
//			var ss = make(map[string]int)
//			var sf []string
//			for _, v := range strings.Split(st, string(RowsDataSplict)) {
//				stt1 := strings.Split(v, string(ColumnDataSplict))
//				if sum, err = strconv.ParseInt(stt1[1], 10, 64); err != nil {
//					vlog = fmt.Sprintf("(%d) Table row count conversion failed. data is %s, error is %s", sps.logThreadSeq, st, err)
//					global.Wlog.Error(vlog)
//					sum = 0
//					ss[stt1[0]]++
//				} else {
//					sum += sum
//					ss[stt1[0]]++
//				}
//			}
//			for k := range ss {
//				sf = append(sf, k)
//			}
//			stt = aa.CheckMd5(strings.Join(sf, ""))
//		} else {
//			if st != "" {
//				stt1 := strings.Split(st, string(ColumnDataSplict))
//				if sum, err = strconv.ParseInt(stt1[1], 10, 64); err != nil {
//					vlog = fmt.Sprintf("(%d) Table row count conversion failed. data is %s, error is %s", sps.logThreadSeq, st, err)
//					global.Wlog.Error(vlog)
//					sum = 0
//				}
//				stt = stt1[0]
//			}
//		}
//	} else {
//		if st != "" {
//			sum = int64(strings.Count(st, string(RowsDataSplict))) + 1
//			stt = st
//		}
//	}
//	return stt, sum
//}
//
//func (sps *SchedulePlan2) sampTableChunkDispos(db map[string]*sql.DB, idxc dbExec.IndexColumnStruct, sqlstr string, logseq int64) (any, error) {
//	var (
//		data any
//		err  error
//	)
//	if data, err = dbExec.TQuery(idxc).GeneratingQueryCriteria(db, sqlstr, logseq); err != nil {
//		return nil, err
//	} else if data == nil {
//		data = []*string{}
//	}
//	return data, nil
//}
//
//// QueryTableData 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
//func (sps *SchedulePlan2) sampQueryTableData(selectSql map[string]*string, logSeq int64) (any, any, error) {
//	var (
//		vlog         string
//		stt, dtt     any
//		ssum, dsum   int64
//		err          error
//		QuryRowDataE string
//	)
//	vlog = fmt.Sprintf("(%d) %v Query row data of table %s.%s", logSeq, QuryRowDataE, sps.schema, sps.table)
//	global.Wlog.Debug(vlog)
//	if stt, err = sps.sampTableChunkDispos(GlobalPConfigs.SDB, sps.idxc, *selectSql["source"], logSeq); err != nil {
//		return nil, nil, err
//	}
//	if dtt, err = sps.sampTableChunkDispos(GlobalPConfigs.DDB, sps.dstIdxc, *selectSql["dest"], logSeq); err != nil {
//		return nil, nil, err
//	}
//	for k, v := range map[*int64]any{
//		&ssum: stt,
//		&dsum: dtt,
//	} {
//		switch fmt.Sprintf("%v", reflect.TypeOf(v)) {
//		case "[]*string":
//			*k = int64(len(v.([]*string)))
//		}
//	}
//	atomic.AddInt64(sps.subTaskInfo.Samp.SAccumulate, ssum)
//	atomic.AddInt64(sps.subTaskInfo.Samp.DAccumulate, dsum)
//	atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Samp.SrcSamp, ssum)
//	atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Samp.DstSamp, dsum)
//	return stt, dtt, nil
//}
//
//// Md5Rows 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
//func (sps *SchedulePlan2) TmpMd5Rows(stt, dtt any, logThreadSeq int64) (any, any, bool) {
//	var (
//		vlog           string
//		aa             = &ea.CheckSumTypeStruct{}
//		event          string
//		sttSum, dttSum int
//	)
//	global.Wlog.Info(fmt.Sprintf("(%d) %v Perform md5 check on the row data of table %s.%s.", logThreadSeq, event, sps.schema, sps.table))
//	for k, v := range map[*int]any{
//		&sttSum: stt,
//		&dttSum: dtt,
//	} {
//		switch fmt.Sprintf("%v", reflect.TypeOf(v)) {
//		case "[]*string":
//			*k = len(v.([]*string))
//		case "*string":
//			*k = 1
//		}
//	}
//	if sttSum != dttSum {
//		return stt, dtt, true
//	}
//	switch fmt.Sprintf("%v", reflect.TypeOf(stt)) {
//	case "[]*string":
//		var s1, d1 []*string
//		for k, v := range stt.([]*string) {
//			if aa.CheckMd5(*v) != aa.CheckMd5(*dtt.([]*string)[k]) {
//				s1 = append(s1, v)
//				d1 = append(d1, dtt.([]*string)[k])
//			} else {
//				runtime.SetFinalizer(v, nil)
//				runtime.SetFinalizer(dtt.([]*string)[k], nil)
//			}
//		}
//		if len(s1) > 0 || len(d1) > 0 {
//			return s1, d1, true
//		}
//	case "*string":
//		if aa.CheckMd5(*stt.(*string)) != aa.CheckMd5(*dtt.(*string)) {
//			global.Wlog.Info(fmt.Sprintf("(%d) %v There is a discrepancy in the md5 of the row data of table %s.%s.", logThreadSeq, event, sps.schema, sps.table))
//			return stt, dtt, true
//		} else {
//			global.Wlog.Debug(fmt.Sprintf("(%d) %v The row data md5 of table %s.%s is the same.", logThreadSeq, event, sps.schema, sps.table))
//			runtime.SetFinalizer(stt, nil)
//			runtime.SetFinalizer(dtt, nil)
//			vlog = fmt.Sprintf("(%d) %v Clean up the row data memory of table %s.%s.", logThreadSeq, event, sps.schema, sps.table)
//			global.Wlog.Debug(vlog)
//		}
//	}
//	vlog = fmt.Sprintf("(%d) %v The md5 verification of the row data of table %s.%s is completed.", logThreadSeq, event, sps.schema, sps.table)
//	global.Wlog.Debug(vlog)
//	return nil, nil, false
//}
//
//// Arrcmp 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
//func (sps *SchedulePlan2) sampArrcmp(stt, dtt any, logThreadSeq int64) map[string][]*string {
//	var (
//		aa    = &ea.CheckSumTypeStruct{}
//		event string
//		A     = make(map[string][]*string)
//	)
//	global.Wlog.Info(fmt.Sprintf("(%d) [%s] Perform rows data arrcmp of table %s.%s.", logThreadSeq, event, sps.schema, sps.table))
//
//	add, del := aa.PointerArrcmp(stt.([]*string), dtt.([]*string))
//	if len(del) > 0 {
//		A[string(Ldelete)] = del
//		atomic.AddInt64(sps.subTaskInfo.Samp.OutlierDetection, int64(len(del)))
//		atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Samp.AbnormalSum, int64(len(del)))
//	}
//	if len(add) > 0 {
//		A[string(Linsert)] = add
//		atomic.AddInt64(sps.subTaskInfo.Samp.OutlierDetection, int64(len(add)))
//		atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Samp.AbnormalSum, int64(len(add)))
//	}
//	for _, v := range [][]*string{
//		stt.([]*string), dtt.([]*string),
//	} {
//		for _, v1 := range v {
//			runtime.SetFinalizer(v1, nil)
//		}
//	}
//	global.Wlog.Debug(fmt.Sprintf("(%d) [%s] The rows data arrcmp of table %s.%s is completed.", logThreadSeq, event, sps.schema, sps.table))
//	return A
//}
//func (sps *SchedulePlan2) sampToDataFixSql(l int64, sqls []*string, t Logo) ([]*string, error) {
//	var (
//		sqlstr               any
//		event                string
//		err                  error
//		insertColumn         []string
//		insertSql, deleteSql []string
//		applySql             []*string
//	)
//	vlog := fmt.Sprintf("(%d) %v Start generating table %s.%s delete or insert statement", l, event, sps.schema, sps.table)
//	global.Wlog.Debug(vlog)
//	for _, v := range sps.tableColData.SColumnInfo {
//		insertColumn = append(insertColumn, v["columnName"])
//	}
//	if strings.HasPrefix(sps.indexColumnType, string(Ipri)) {
//		sps.samp.GeneralSql.IndexType = string(Ipri)
//	} else if strings.HasPrefix(sps.indexColumnType, string(Iuni)) {
//		sps.samp.GeneralSql.IndexType = string(Iuni)
//	} else {
//		sps.samp.GeneralSql.IndexType = string(Imui)
//	}
//
//	s2 := fmt.Sprintf("INSERT INTO `%v`.`%v` (`%s`) VALUES ", sps.mschema, sps.mtable, strings.Join(insertColumn, "`,`"))
//	generalSqlInput := global.GeneralSqlInput{
//		LogSeq:  sps.logThreadSeq,
//		ColData: sps.tableColData.SColumnInfo,
//	}
//	for _, i := range sqls {
//		switch t {
//		case Ldelete:
//			//atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Rows.DeleteSum, 1)
//			generalSqlInput.RowData = *i
//			if sqlstr, err = dbExec.General(sps.samp.GeneralSql).Delete(generalSqlInput); err != nil {
//				return nil, err
//			}
//			str := sqlstr.(string)
//			if len(str) > 0 {
//				deleteSql = append(deleteSql, str)
//			}
//		case Linsert:
//			//			atomic.AddInt64(terminalPods[getTableName(sps.schema, sps.table)].Rows.InsertSum, 1)
//			generalSqlInput.RowData = *i
//			if sqlstr, err = dbExec.General(sps.samp.GeneralSql).Insert(generalSqlInput); err != nil {
//				return nil, err
//			}
//			str := sqlstr.(string)
//			str = strings.ReplaceAll(str, global.DataColumnsSplist, ",")
//			if len(str) > 0 {
//				insertSql = append(insertSql, str)
//			}
//		}
//	}
//	if len(insertSql) > 0 {
//		str := fmt.Sprintf("%v (%v);", s2, strings.Join(insertSql, "),("))
//		applySql = []*string{&str}
//	}
//	if len(deleteSql) > 0 {
//		var ll = make([]*string, len(deleteSql))
//		for k, v := range deleteSql {
//			v1 := v
//			ll[k] = &v1
//		}
//		applySql = ll
//	}
//	vlog = fmt.Sprintf("(%d) %v The repair statement for table %s.%s is generated.", l, event, sps.schema, sps.table)
//	global.Wlog.Debug(vlog)
//	return applySql, nil
//}
//
//// AbnormalDataDiff 差异数据的二次校验，并生成修复语句
//func (sps *SchedulePlan2) sampAbnormalDataDiff(diffQueryData map[string][]*string, logThreadSeq int64) (map[Logo][]*string, error) {
//	var (
//		vlog       string
//		adds, dels string
//		fixSql     = make(map[Logo][]*string)
//		err        error
//	)
//	vlog = fmt.Sprintf("(%d) Check table %s.%s to start differential data processing and generate repair statements ...", logThreadSeq, sps.schema, sps.table)
//	global.Wlog.Info(vlog)
//	for _, v := range []Logo{
//		Linsert,
//		Ldelete,
//	} {
//		if add, ok := diffQueryData[string(v)]; ok && len(add) > 0 {
//			global.Wlog.Debug(fmt.Sprintf("(%d) Check table %s.%s abnormal data insert is %v ", logThreadSeq, sps.schema, sps.table, diffQueryData[string(Linsert)]))
//			var ss []*string
//			if ss, err = sps.sampToDataFixSql(logThreadSeq, add, v); err != nil {
//				return nil, err
//			}
//			fixSql[v] = ss
//		}
//	}
//	global.Wlog.Debug(fmt.Sprintf("(%d) Check table %s.%s abnormal sql delete is %v and insert is %v ", logThreadSeq, sps.schema, sps.table, dels, adds))
//	vlog = fmt.Sprintf("(%d) Check table %s.%s to complete differential data processing and generate repair statements. !!!", logThreadSeq, sps.schema, sps.table)
//	global.Wlog.Info(vlog)
//	return fixSql, nil
//}
//
//// DataFixDiff 修复差异数据
//func (sps *SchedulePlan2) sampDataFixDiff(fixSQL map[Logo][]*string, logThreadSeq int64) error {
//	var (
//		//vlog  string
//		wfile *os.File
//	)
//	for _, v := range []Logo{
//		Ldelete,
//		Linsert,
//	} {
//		if v == Ldelete {
//			wfile = sps.repair.FixFileDeleteF
//		}
//		if v == Linsert {
//			wfile = sps.repair.FixFileInsertF
//		}
//		if v1, ok := fixSQL[v]; ok {
//			sps.samp.ApplySql.WriteTextI = outPut.FileOut("text", map[string]any{"filen": wfile, "logSeq": logThreadSeq,
//				"bufSize": 1024 * 1024 * 4, "sqlType": "sql", "rowsS": "\n",
//			})
//			applySqlInput := global.ApplySqlInput{
//				LogSeq:         sps.logThreadSeq,
//				ApplySqlGather: v1,
//			}
//			if _, err := dbExec.Apply(sps.samp.ApplySql).DmlSql(applySqlInput); err != nil {
//				return err
//			}
//		}
//	}
//	return nil
//}
