package tkM

import (
	"errors"
	"fmt"
	"db2s/Er"
	"db2s/Fp"
	"db2s/Meta"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/parDef"
	"db2s/ref"
	mq "db2s/topic-mq"
	"math"
	"reflect"
	"sort"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

// get schema table name
func getTableName(schema, table string) (s string) {
	return fmt.Sprintf("%s.%s", schema, table)
}

// error info column不一致

// 出现错误集体处理
func (sp SchedulePlan2) errorAction(message mq.Message, logSeq int64, err any) bool {
	fmt.Println("-----------err:", message.Topic, err)
	var plan = message.Properties["plan"].(*SchedulePlan2)
	taskObject, ok := message.Properties["taskObject"]
	if ok {
		if taskObject != nil {
			atomic.SwapInt64(taskObject.(Fp.TaskDistribution).ErrEnd, -1)
		}
	}
	atomic.AddInt64(sp.Status.ErrEnd, -1)
	plan.MQ.AbnormalErrorCodeSwap()
	PlanContext.TaskBarSubsErrorSwap(sp.subTaskInfo, errorActive(&sp, fmt.Sprintf("%v", err)))
	if PlanContext.TaskBarSubsStatusGet(sp.subTaskInfo) != -2 {
		PlanContext.TaskBarSubsStatusSwap(sp.subTaskInfo, "error")
	}
	PlanContext.TPodTableStatusSwap(sp.TPod.Status, "error")
	sp.TPod.Result = "error"
	return true
}

func (sp SchedulePlan2) speedMonitor(done chan struct{}, wg *sync.WaitGroup) {
	var (
		//lastSecondExecReadTime                uint64
		//lastSecondExecReadCount               int64
		lastSecondReadSum, lastSecondWriteSum int64
		speedRestricted                       = "N"
		speedRestriction                      string
		readRate                              uint64
	)
	ll := time.NewTicker(time.Duration(1) * time.Second)
	for {
		select {
		case <-ll.C:
			//if *sps.TableSpeedLimit.ReadSum <= 0 {
			//	continue
			//}
			var speedLimitStatus bool
			//获取读取端和写入端的差异值
			//l := (*sps.TableSpeedLimit.ReadSum - *sps.TableSpeedLimit.WriteSum) * 100
			//p := l / (*sps.TableSpeedLimit.ReadSum)
			//获取读取端每秒的速率
			//rq := *sps.TableSpeedLimit.ReadAvgTime - lastSecondExecReadTime
			//rc := *sps.TableSpeedLimit.ReadExecCount - lastSecondExecReadCount
			//if rq == 0 || rc == 0 {
			//	readRate = 0
			//} else {
			//	readRate = rq / uint64(rc)
			//}
			//读写差异速率判断
			//if p >= 20 { //src data rate > dst data rate   ==> write rate low
			//	speedRestriction = "RWD"
			//	speedLimitStatus = true
			//} else if readRate > 1000 { //读取效率太差
			//	speedRestriction = "RR"
			//	speedLimitStatus = true
			//}
			if speedLimitStatus {
				speedRestricted = "Y"
				atomic.SwapInt64(sp.TableSpeedLimit.Status, 1)
				if sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") >= 1000 {
					sp.TableSpeedLimit.SpeedLimit.SetCapacity(sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") / 1000)
				}
				if readRate > 1000 {
					if atomic.LoadInt64(sp.ReadOptimizer.Thread) > 5 {
						atomic.SwapInt64(sp.ReadOptimizer.Thread, atomic.LoadInt64(sp.ReadOptimizer.Thread)-1)
					}
					if atomic.LoadInt64(sp.writeOptimizer.Thread) > 1 {
						atomic.SwapInt64(sp.writeOptimizer.Thread, atomic.LoadInt64(sp.writeOptimizer.Thread)-1)
					}
				}
			} else {
				speedRestricted = "N"
				atomic.SwapInt64(sp.TableSpeedLimit.Status, 0)
				if sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") <= 100000000 {
					sp.TableSpeedLimit.SpeedLimit.SetCapacity(sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") * 10)
				}
				if readRate < 501 {
					atomic.SwapInt64(sp.ReadOptimizer.Thread, atomic.LoadInt64(sp.ReadOptimizer.Thread)+1)
				}
			}
			if GlobalPConfigs.result.Teletypewriter == "bar" {
				if PlanContext.TaskBarSubsStatusGet(sp.subTaskInfo) != -2 {
					PlanContext.SpeedTaskBarAccumulate(mq.SpeedLimitMonitor{
						Schema: sp.Object.Schema,
						Table:  sp.Object.Table,
						SS:     speedRestricted,
						SR:     speedRestriction,
						RS:     fmt.Sprintf("%v", *sp.TableSpeedLimit.ReadSum-lastSecondReadSum),
						WS:     fmt.Sprintf("%v", *sp.TableSpeedLimit.WriteSum-lastSecondWriteSum),
						//RWD:    fmt.Sprintf("%v", p),
						RR: fmt.Sprintf("%v", readRate),
						RP: fmt.Sprintf("%v", *sp.TableSpeedLimit.ReadParallel),
						WP: fmt.Sprintf("%v", *sp.TableSpeedLimit.WriteParallel),
						BS: fmt.Sprintf("%v", sp.TableSpeedLimit.SpeedLimit.GetRefill("cap")),
						//NS: fmt.Sprintf("%v", atomi),
						RBS: fmt.Sprintf("%v", sp.ReadOptimizer.ChunkSum),
					})
				}
			}
			lastSecondReadSum = *sp.TableSpeedLimit.ReadSum
			lastSecondWriteSum = *sp.TableSpeedLimit.WriteSum
			//lastSecondExecReadTime = *sps.TableSpeedLimit.ReadAvgTime
			//lastSecondExecReadCount = *sps.TableSpeedLimit.ReadExecCount
		case <-done:
			wg.Done()
			return
		}
	}
}

func destObjectExistIf(Object global.Object, parameter parDef.Parameter) (res bool, err error) {
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.DestDBName}, "left", Object.Role)
	c.Parameter1 = append(c.Parameter1, parameter)
	if res, err = c.GetObjectExistence(); err != nil {
		return
	}
	return
}
func tableMetaArr(plan *SchedulePlan2, join string) (res bool, err error) {
	var (
		//event   = "[tableMetaArr]"
		groupBy int
	)
	var result any
	if result, err = tableColumnMeta(plan, join); err != nil || result == nil {
		return
	}
	switch result.(type) {
	case any:

	}
	var s = result.([]any)[0]
	if s == nil {
		return
	}
	res = true
	//for _, source := range s.(global.Return).Result.([]map[string]any) {
	//	if !GlobalPConfigs.rules.Task.IgColumn {
	//		var status bool
	//		for _, target := range t.([]map[string]any) {
	//			if strings.EqualFold(fmt.Sprintf("%v", source["columnName"]), fmt.Sprintf("%v", target["columnName"])) {
	//				status = true
	//				break
	//			}
	//		}
	//		if !status {
	//			log.ErrorLog().Error(fmt.Sprintf("%v There are differences in the structural data of table %v.%v on the original target side. src data is {%v}", event, plan.Object.Schema, plan.Object.Table, source["columnName"]))
	//			res = false
	//			return
	//		}
	//		continue
	//	}
	//	for _, target := range t.(global.Return).Result.([]map[string]any) {
	//		if strings.EqualFold(fmt.Sprintf("%v", source["columnName"]), fmt.Sprintf("%v", target["columnName"])) {
	//			groupBy++
	//			break
	//		}
	//	}
	//}
	if groupBy == 0 && GlobalPConfigs.rules.Task.IgColumn {
		res = false
	}
	return
}
func tableObjectRoleRowsCheck(message mq.Message) (err error) {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	//var event = "[tableObjectRoleRowsCheck]"
	//比较源目标端列一致性
	//var exist bool
	//if exist, err = tableMetaArr(plan, "left"); err != nil || !exist {
	//	warnWriteInfo(fmt.Sprintf("%v %v The original target table structure consistency comparison of table %v.%v failed. The table will exit!!!", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	//	plan.errorAction(message, plan.Object.ObjectLogSeq, errors.New(fmt.Sprintf("Column length mismatch")))
	//	return
	//}
	//if exist, err = destObjectExistIf(plan.Object, plan.DstParameter); err != nil || !exist {
	//	fmt.Println("-------err:", err)
	//	return
	//}
	//查看是否存在big varchar
	if err = BigVarchar(plan); err != nil {
		return
	}
	return
}
func tableObjectRoleMetaCheck(message mq.Message) (err error) {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var res bool
	plan.TPod.Options = "Create"
	if res, err = destObjectExistIf(plan.Object, plan.DstParameter); err != nil {
		return
	}
	if res {
		plan.TableTargetStatus = true
		plan.TPod.Options = "Alter"
	}
	return
}
func tableObjectRoleActiveCheck(message mq.Message) (err error) {
	for _, subMode := range GlobalPConfigs.rules.Task.ActiveMode {
		switch subMode {
		case "sync", "rows":
			if err = tableObjectRoleRowsCheck(message); err != nil {
				return
			}
		case "struct", "object":
			if err = tableObjectRoleMetaCheck(message); err != nil {
				return
			}
		}
	}
	return
}

func tableColumnMeta(plan *SchedulePlan2, join string) (result any, err error) {
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName, GlobalPConfigs.dSns.DestDBName}, join, "table")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter, plan.DstParameter)
	if result, err = c.GetColumnMetaST(); err != nil {
		return
	}
	return
}
func BigVarchar(plan *SchedulePlan2) (err error) {
	var res any
	var event = "[BigVarchar]"
	var columnName, columnType []string
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName}, "left", "table")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter)
	if res, err = c.GetLobColumnMetaS(); err != nil {
		return
	}
	if res == nil {
		if res, err = c.GetBigVarcharColumnMetaS(); err != nil || res == nil {
			return
		}
	}
	for kk, vv := range res.(map[string]string) {
		columnName = append(columnName, kk)
		columnType = append(columnType, fmt.Sprintf("%v", vv))
	}
	log.MainLog().Info(fmt.Sprintf("(%v) %v It is detected that the lob field exists in table %v.%v", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	plan.TPod.Task.TextCN = strings.Join(columnName, "\n")
	plan.TPod.Task.TextCT = strings.Join(columnType, "\n")
	return
}
func partType(plan *SchedulePlan2) (pType string, err error) {
	var res any
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName}, "left", "table")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter)
	if res, err = c.GetPartTypeMetaS(); err != nil || res == nil {
		return
	}
	pType = fmt.Sprintf("%v", res)
	return
}
func partSum(plan *SchedulePlan2) (sum int64, err error) {
	var res any
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName}, "left", "table")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter)
	if res, err = c.GetPartSumMetaS(); err != nil || res == nil {
		return
	}
	sum = res.(int64)
	return
}
func indexKeyChoose(message mq.Message) (err error) {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var event = "[indexKeyChoose]"
	var taskMetaResult = message.Properties["taskMetaResult"].(*taskModeResultS)
	var res any
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName}, "left", "table")
	plan.SrcParameter.Con = GlobalPConfigs.db.Source.GetDB("single")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter)
	if res, err = c.GetKeyChooseS(); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get source table %v.%v index key choose fail. error is %v ", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, err))
		handleError(message, event, err)
		return
	}
	if res == nil {
		return
	}
	taskMetaResult.index.result = res
	return
}

func (sp *SchedulePlan2) IndexOptimizerTrace() (result any, err error) {
	var (
		t     Er.TablesMetaInfoEr
		event = "[IndexOptimizerTrace]"
	)
	if t, err = Er.MetaInfo(Er.TableInfoMeta{DBType: GlobalPConfigs.dSns.SrcDBName}); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get source table %v.%v index optimizer trace fail. error is %v ", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return
	}
	if result, err = t.IndexColumn(global.TablesMetaInfoInput{
		LogSeq: sp.Object.ObjectLogSeq,
		TableInfo: global.TableInfo{
			Schema: sp.Object.Schema,
			Table:  sp.Object.Table,
		},
	}); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get source table %v.%v index column . error is %v ", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return
	}
	if result == nil {
		return
	}
	if result, err = t.IndexOptimizerTrace(global.TablesMetaInfoInput{
		LogSeq:      sp.Object.ObjectLogSeq,
		IndexColumn: result.([]global.IndexColumn)[:1],
	}); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get source table %v.%v index optimizer trace fail. error is %v ", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return nil, err
	}
	return
}
func (sp *SchedulePlan2) columnHistogram(indexResult any) (result []global.SingleIndexResult, err error) {
	var (
		event = "[columnHistogram]"
	)
	if indexResult == nil {
		return
	}
	if fmt.Sprintf("%v", reflect.TypeOf(indexResult)) != "global.IndexColumnSumGroup" {
		err = errors.New(fmt.Sprintf("result type mismatch. current type:%v expected type:%v", reflect.TypeOf(indexResult), "global.IndexColumnSumGroup"))
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get table %v.%v column Histogram meta data fail!!! error is %v", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return
	}
	var allIndexColumn []string
	q := indexResult.(global.IndexColumnSumGroup)
	for _, columnSum := range []global.IndexPrioritySort{
		q.Single, q.Multiple,
	} {
		for k, indexTypeGroup := range [][]global.SingleIndexResult{
			columnSum.Primary, columnSum.UniqueKey, columnSum.Key,
		} {
			if len(indexTypeGroup) == 0 {
				continue
			}
			var indexType string
			switch k {
			case 0:
				indexType = "primary"
			case 1:
				indexType = "unique"
			case 2:
				indexType = "key"
			}
			for _, v := range indexTypeGroup {
				v.Type = indexType
				result = append(result, v)
				allIndexColumn = append(allIndexColumn, v.Column...)
			}
		}
	}
	allIndexColumn = removeDuplicates(allIndexColumn)
	sort.Slice(allIndexColumn, func(i, j int) bool {
		return allIndexColumn[i] < allIndexColumn[j]
	})
	for i := range result {
		result[i].AllIndexColumn = allIndexColumn
	}
	var lessTwoResult, greaterTwoResult []global.SingleIndexResult
	for _, v := range result {
		if len(v.Column) <= 2 {
			lessTwoResult = append(lessTwoResult, v)
		} else {
			greaterTwoResult = append(greaterTwoResult, v)
		}
	}
	sort.Slice(lessTwoResult, func(i, j int) bool {
		return lessTwoResult[i].Cardinality > lessTwoResult[j].Cardinality
	})
	result = []global.SingleIndexResult{}
	result = append(result, lessTwoResult...)
	result = append(result, greaterTwoResult...)
	return
}
func (sp *SchedulePlan2) columnSql(indexName string, s global.StartPart, OutputColumnType string, allIndexColumn []string) (strSql string, err error) {
	var (
		event = "[columnSql]"
		t     Er.RowsDataEr
	)
	log.MainLog().Debug(fmt.Sprintf("(%v) %v Start to generating sql for query table %v.%v data.", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table))
	if t, err = Er.RowsData(Er.TableRows{DBType: GlobalPConfigs.dSns.SrcDBName}); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v failed to generating sql for query table %v.%v data. error is %v", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return "", err
	}
	s.SelectColumn.IndexName = indexName
	s.SelectColumn.OutputColumnType = OutputColumnType
	s.SelectColumn.SelectColumn = allIndexColumn
	//查询该表的列名和列信息xz
	if strSql, err = t.Sql(s); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v failed to generating sql for query table %v.%v data. error is %v", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return
	}
	log.MainLog().Debug(fmt.Sprintf("(%v) %v Finish to generating sql for query table %v.%v data.", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table))

	return
}

// indexColumnData 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
func (sp SchedulePlan2) indexColumnData(selectSql string) (result any, err error) {
	var (
		stt     global.ExecSqlResult
		event   = "[indexColumnData]"
		timeout int64
		t       Er.RowsDataEr
	)
	log.MainLog().Debug(fmt.Sprintf("(%d) %v start Query row data of table %s.%s", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table))
	if t, err = Er.RowsData(Er.TableRows{DBType: GlobalPConfigs.dSns.SrcDBName}); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%d) %v The data query of table %s.%s fails. error is {%v}", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return "", err
	}
	if len(selectSql) == 0 {
		return nil, errors.New(fmt.Sprintf("%v query index column data sql is entry. Execution process:{sql:%v}", event, selectSql))
	}
	l := global.RowsDataInput{
		//DB:        GlobalPConfigs.SDB,
		Sql:       selectSql,
		TableInfo: global.TableInfo{Schema: sp.Object.Schema, Table: sp.Object.Table, BackendTableName: sp.Object.Table},
		Input: global.StartPartConfigInputP{
			SqlExecStopTime: time.Duration(GlobalPConfigs.rules.Task.TimeOut) * time.Second,
		},
	}
	l.TableInfo.ShardName = "single"
	if stt, err = t.Data(l); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("(%d) %v The data query of table %s.%s fails. error is {%v}", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return nil, err
	}
	switch fmt.Sprintf("%v", reflect.TypeOf(stt)) {
	case "global.ExecSqlResult":
		timeout = stt.Timeout
		result = stt.Result
	default:
		err = errors.New(fmt.Sprintf("result type mismatch. current type:%v expected type:%v", reflect.TypeOf(stt), "global.ExecSqlResult"))
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v get table %v.%v index column data fail!!! error is %v", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err))
		return
	}
	if sp.TableSpeedLimit.ReadAvgTime != nil {
		atomic.AddUint64(sp.TableSpeedLimit.ReadAvgTime, uint64(timeout))
	}
	if sp.TableSpeedLimit.ReadExecCount != nil {
		atomic.AddInt64(sp.TableSpeedLimit.ReadExecCount, 1)
	}
	if sp.TableSpeedLimit.ReadSum != nil {
		atomic.AddInt64(sp.TableSpeedLimit.ReadSum, int64(len(result.([][]*string))))
	}
	log.MainLog().Debug(fmt.Sprintf("(%d) %v Let the data query of table %v.%v be completed.", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table))
	return result, nil
}
func (sp SchedulePlan2) columnDataHistogramActive(result any, uniqCountMap []map[string]int, ch *columnHistogram) error {
	var (
		event = "[columnDataHistogramActive]"
	)
	if result == nil {
		return errors.New(fmt.Sprintf("%v The input index column data is empty.", event))
	}
	switch fmt.Sprintf("%v", reflect.TypeOf(result)) {
	case "[][]*string":
		for _, v := range result.([][]*string) {
			for k1, v1 := range v {
				if _, ok := uniqCountMap[k1][*v1]; !ok {
					if !global.IsChineseOrRare(*v1) {
						uniqCountMap[k1][*v1]++
						atomic.AddInt64(ch.UniqueSum[k1], 1)
					} else {
						uniqCountMap[k1]["Chinese"] = 0
					}
				}
			}
		}
		atomic.AddInt64(ch.Total, int64(len(result.([][]*string))))
		if sp.subTaskInfo != nil {
			atomic.AddInt64(sp.subTaskInfo.SAccumulate, int64(len(result.([][]*string))))
			atomic.AddInt64(sp.subTaskInfo.DAccumulate, int64(len(result.([][]*string))))
		}
		atomic.AddInt64(ch.Segment, 1)
	default:
		log.ErrorLog().Error(fmt.Sprintf("%v The input index column data type does not comply with the specification. Execution process:{indexType:%v}", event, reflect.TypeOf(result)))
		return errors.New(fmt.Sprintf("result type mismatch."))
	}
	return nil
}
func removeDuplicates(strings []string) []string {
	// 创建一个 map 来记录已经出现的字符串
	seen := make(map[string]struct{})
	var result []string
	for _, str := range strings {
		if _, ok := seen[str]; !ok {
			seen[str] = struct{}{}       // 记录这个字符串
			result = append(result, str) // 添加到结果切片
		}
	}
	return result
}
func (sp *SchedulePlan2) columnSegmentation(OutputColumnType string, uniqCountMap []map[string]int, seq global.SingleIndexResult) (ch columnHistogram, err error) {
	var (
		taskObject1       = Fp.TaskDistribution{}
		errEnd      int64 = 0
		wg          sync.WaitGroup
		done        = make(chan struct{}, 1)
		total       int64
		segment     int64
		result      any
		breakStatus int64
	)
	defer func() {
		done <- struct{}{}
		wg.Wait()
	}()
	ch = columnHistogram{
		Total:   &total,
		Segment: &segment,
		Break:   &breakStatus,
		UniqueSum: func() (s []*int64) {
			switch OutputColumnType {
			case "index":
				for k := 0; k < len(seq.Column); k++ {
					var uniqueSum int64
					s = append(s, &uniqueSum)
				}
				seq.AllIndexColumn = seq.Column
			case "specify":
				for k := 0; k < len(seq.AllIndexColumn); k++ {
					var uniqueSum int64
					s = append(s, &uniqueSum)
				}
			}
			return
		}(),
	}
	taskObject1 = Fp.TaskDistribution{
		LogSeq:     sp.Object.ObjectLogSeq,
		DataSource: sp.ReadOptimizer.ChunkStartSource,
		TableObject: Fp.TableObject{
			Schema:           sp.Object.Schema,
			Table:            sp.Object.Table,
			BackendTableName: sp.Object.Table,
			ShardName:        "single",
			RowsLimit:        sp.Object.RowsLimit,
		},
		//Db: Fp.ConnDb{
		//	SourceObject: GlobalPConfigs.dSns.SrcDBName,
		//	DestObject:   GlobalPConfigs.dSns.DestDBName,
		//	//Sdb:          GlobalPConfigs.SDB,
		//	//Ddb:          GlobalPConfigs.DDB,
		//},
		//TableInfo: Fp.TableInfo{
		//	//TableColumn: sp.Object.TableColData.SColumnInfo,
		//	IndexColumn: seq.Column,
		//	IndexName:   seq.Name,
		//},
		StaticP: Fp.StaticParameter{
			CpLength:        GlobalPConfigs.rules.QueueSize,
			MulFactor:       sp.ReadOptimizer.Thread,
			ChunkNumber:     sp.ReadOptimizer.ChunkSum,
			Scn:             GlobalPConfigs.rules.Task.Scn,
			QueryFilter:     "", //tableWhereAdd
			SqlExecStopTime: time.Duration(GlobalPConfigs.rules.Task.TimeOut) * time.Second,
		},
		//Result: make(chan any, GlobalPConfigs.rules.QueueSize),
		ErrEnd: &errEnd,
	}
	wg.Add(1)
	go sp.speedMonitor(done, &wg)
	taskObject1.First.Result = make(chan any, GlobalPConfigs.rules.QueueSize)
	taskObject1.IsSegmentValue()
	for {
		select {
		case c, ok := <-taskObject1.First.Result:
			if !ok {
				return ch, nil
			}
			if len(c.(global.StartPart).Error) > 0 {
				sp.TPod.Result = "error"
				return ch, errors.New(fmt.Sprintf("%v", c.(global.StartPart).Error))
			}
			if atomic.LoadInt64(ch.Break) == -1 {
				atomic.SwapInt64(taskObject1.ErrEnd, -1)
				err = errors.New(fmt.Sprintf("Chinese data appears"))
				sp.TPod.Result = "error"
				return
			}
			switch {
			case GlobalPConfigs.rules.Task.HistogramScale > 0 && GlobalPConfigs.rules.Task.HistogramScale < 100:
				if atomic.LoadInt64(sp.TPod.Task.Sum) > 0 {
					if atomic.LoadInt64(sp.subTaskInfo.SAccumulate) >= int64(float64(atomic.LoadInt64(sp.TPod.Task.Sum))*(float64(GlobalPConfigs.rules.Task.HistogramScale)/100)) {
						return ch, nil
					}
				}
			}
			var strSql string
			if strSql, err = sp.columnSql(seq.Name, c.(global.StartPart), OutputColumnType, seq.AllIndexColumn); err != nil {
				return
			}
			if result, err = sp.indexColumnData(strSql); err != nil {
				sp.TPod.Result = "error"
				return
			}
			if err = sp.columnDataHistogramActive(result, uniqCountMap, &ch); err != nil {
				sp.TPod.Result = "error"
				return
			}
			if sp.subTaskInfo != nil {
				PlanContext.TaskBarSubsValueSwap(sp.subTaskInfo, "cardinality", func() string {
					var s []string
					for k, v := range ch.UniqueSum {
						s = append(s, fmt.Sprintf("%v:%v", seq.AllIndexColumn[k], *v))
					}
					return strings.Join(s, ",")
				}())
			}
			result = nil
		}
	}
}
func (sp *SchedulePlan2) getColumnSegmentation(indexSort []global.SingleIndexResult) any {
	var (
		OutputColumnType = "specify"
		HistogramStatus  bool
		event            = "[getColumnSegmentation]"
		result           columnHistogram
		histogram        []columnHistogramMerge
		err              error
	)
	for _, seq := range indexSort {
		if sp.subTaskInfo != nil {
			PlanContext.TaskBarSubsValueSwap(sp.subTaskInfo, "indexName", seq.Name)
			PlanContext.TaskBarSubsValueSwap(sp.subTaskInfo, "indexObject", "I")
			if sp.subTaskInfo.SAccumulate != nil {
				atomic.SwapInt64(sp.subTaskInfo.SAccumulate, 0)
			}
			if sp.subTaskInfo.DAccumulate != nil {
				atomic.SwapInt64(sp.subTaskInfo.DAccumulate, 0)
			}
		}
		var ch columnHistogram
		var uniqCountMap = make([]map[string]int, len(seq.AllIndexColumn))
		for k := range seq.AllIndexColumn {
			uniqCountMap[k] = make(map[string]int)
		}
		if ch, err = sp.columnSegmentation(OutputColumnType, uniqCountMap, seq); err != nil {
			log.MainLog().Warn(fmt.Sprintf("(%v) %v Failed to query column data histogram. Execution process:{schema:%v,table:%v,indexName:%v,indexColumn:%v,err:%v}", sp.Object.ObjectLogSeq, event, seq.Name, seq.Column, sp.Object.Schema, sp.Object.Table, err))
			continue
		}
		HistogramStatus = true
		ch.Name = seq.Name
		ch.Column = seq.Column
		ch.AllColumn = seq.AllIndexColumn
		result = ch
		if HistogramStatus {
			break
		}
	}
	for _, seq := range indexSort { //遍历索引
		var (
			total int64
			chr   = columnHistogramMerge{
				Name:   seq.Name,
				Column: seq.Column,
				Type:   seq.Type,
				Total:  &total,
			}
		)
		for _, v := range seq.Column { //遍历单个索引的列数据
			for k2, v2 := range result.AllColumn { //获取所有列
				if v == v2 {
					chr.UniqueSum = append(chr.UniqueSum, result.UniqueSum[k2])
					break
				}
			}
		}
		if chr.Total != nil && result.Total != nil {
			atomic.AddInt64(chr.Total, atomic.LoadInt64(result.Total))
		}
		if len(chr.Column) == len(chr.UniqueSum) {
			var appendSwitch bool
			for _, uniq := range chr.UniqueSum {
				if atomic.LoadInt64(uniq) == 0 {
					appendSwitch = true
					break
				}
			}
			if !appendSwitch {
				histogram = append(histogram, chr)
			}
		}
	}
	return histogram
}

// 返回，up为上升，down为下降 eq为相等
func (sp *SchedulePlan2) trendOptimizer(data []*int64) (result []string) {
	previous := atomic.LoadInt64(data[0])
	for _, current := range data[1:] {
		if atomic.LoadInt64(current) > previous {
			result = append(result, "up")
		} else if atomic.LoadInt64(current) < previous {
			result = append(result, "down")
		} else {
			result = append(result, "eq")
		}
	}
	return
}
func (sp *SchedulePlan2) histogramIndexChoose(columnHistogram any) (err error) {
	var (
		event       = "[indexChoose]"
		histogram   float64
		indexName   string
		indexColumn []string
	)
	log.MainLog().Info(fmt.Sprintf("(%v) %v Start index selection for table %v.%v ...", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table))
	switch {
	case !strings.EqualFold(fmt.Sprintf("%v", reflect.TypeOf(columnHistogram)), "[]taskMode.columnHistogramMerge"):
		err = errors.New(fmt.Sprintf("result type mismatch."))
		log.ErrorLog().Error(fmt.Sprintf("(%v) %v table %v.%v index choose fail!!!  error is %v current type:%v expected type:%v", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, err, reflect.TypeOf(columnHistogram), "[]columnHistogramMerge"))
		return
	case len(columnHistogram.([]columnHistogramMerge)) == 0:
		return
	}
outerLoop:
	for _, v := range columnHistogram.([]columnHistogramMerge) {
		indexName = v.Name
		indexColumn = []string{}
		sp.TPod.IndexName = indexName
		sp.TPod.IndexType = v.Type
		sp.TPod.IndexCol = strings.Join(v.Column, ",")
		if len(v.Column) > 3 {
			v.Column = v.Column[:2]
			v.UniqueSum = v.UniqueSum[:2]
		}
		log.MainLog().Info(fmt.Sprintf("(%v) %v The index column of index %v of table %v.%v and the dispersion of the column are %v", sp.Object.ObjectLogSeq, event, v.Name, sp.Object.Schema, sp.Object.Table, func() string {
			var p []string
			for k1, v1 := range v.UniqueSum {
				p = append(p, fmt.Sprintf("%v:%v", v.Column[k1], atomic.LoadInt64(v1)))
			}
			return fmt.Sprintf("%v:[%v]", v.Name, strings.Join(p, ","))
		}()))
		switch {
		case len(v.Column) == 1:
			var dispersion int64
			for _, v1 := range v.UniqueSum {
				dispersion += atomic.LoadInt64(v1)
			}
			histogram = float64(dispersion)
			if math.Round(float64(atomic.LoadInt64(v.Total)/1000/dispersion)/100) <= 2 {
				break outerLoop
			}
			//histogram = math.Round(float64(dispersion * 100 / atomic.LoadInt64(v.Total)))
		case len(v.Column) <= 3:
			for k, trend := range sp.trendOptimizer(v.UniqueSum) {
				switch trend {
				case "up":
					//histogram = math.Round(float64(atomic.LoadInt64(v.UniqueSum[k+1])*10000/atomic.LoadInt64(v.Total)) / 100)
					//if histogram >= 30 {
					//	indexColumn = append(indexColumn, v.Column[:k+2]...)
					//	indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
					//	sps.Task.IndexData.IndexName = indexName
					//	break outerLoop
					//} else {
					//	indexName = ""
					//}
					var seq int
					if k == 0 {
						seq = 0
					} else {
						seq = k - 1
					}
					var dispersion = atomic.LoadInt64(v.UniqueSum[seq])
					histogram = float64(dispersion)
					if math.Round(float64(atomic.LoadInt64(v.Total)/1000/dispersion)/100) <= 2 {
						indexColumn = append(indexColumn, v.Column[:seq+1]...)
						indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
						sp.TPod.IndexName = indexName
						break outerLoop
					}
				case "down":
					var seq int
					if k == 0 {
						seq = 0
					} else {
						seq = k - 1
					}
					var dispersion = atomic.LoadInt64(v.UniqueSum[seq])
					histogram = float64(dispersion)
					if math.Round(float64(atomic.LoadInt64(v.Total)/1000/dispersion)/100) <= 2 {
						indexColumn = append(indexColumn, v.Column[:seq+1]...)
						indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
						sp.TPod.IndexName = indexName
						break outerLoop
					}
					//if math.Round(float64(atomic.LoadInt64(v.Total)/1000/atomic.LoadInt64(v.UniqueSum[seq]))/100) <= 2 {
					//	indexColumn = append(indexColumn, v.Column[:seq+1]...)
					//	indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
					//	sps.Task.IndexData.IndexName = indexName
					//	break outerLoop
					//}
				case "eq":
					var seq int
					if k == 0 {
						seq = 0
					} else {
						seq = k - 1
					}
					var dispersion = atomic.LoadInt64(v.UniqueSum[seq])
					histogram = float64(dispersion)
					if math.Round(float64(atomic.LoadInt64(v.Total)/1000/dispersion)/100) <= 2 {
						indexColumn = append(indexColumn, v.Column[:seq+1]...)
						indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
						sp.TPod.IndexName = indexName
						break outerLoop
					}
					//histogram = math.Round(float64(atomic.LoadInt64(v.UniqueSum[seq])*10000/atomic.LoadInt64(v.Total)) / 100)
					//if histogram >= 30 {
					//	indexColumn = append(indexColumn, v.Column[:seq+1]...)
					//	indexName = fmt.Sprintf("%v(%v)", indexName, strings.Join(indexColumn, ","))
					//	sps.Task.IndexData.IndexName = indexName
					//	break outerLoop
					//} else {
					//	indexName = ""
					//}
				}
			}
			indexName = ""
		default:
			indexName = ""
			return
		}
	}
	sp.TPod.CardinalityRate = fmt.Sprintf("%v%%", histogram)
	switch {
	case histogram > 50:
		sp.TPod.Task.RateEvaluation = "优"
	case histogram > 30:
		sp.TPod.Task.RateEvaluation = "良"
	default:
		sp.TPod.Task.RateEvaluation = "差"
	}
	if histogram >= 30 {
		log.MainLog().Info(fmt.Sprintf("(%v) %v The selected index of table %v.%v is %v, and the dispersion degree of the column is %v.", sp.Object.ObjectLogSeq, event, sp.Object.Schema, sp.Object.Table, indexName, histogram))
	} else {
		indexName = ""
	}
	return
}
func normalIndexChoose(message mq.Message, columnHistogram any) (err error) {
	var (
		event     = "[normalIndexChoose]"
		histogram float64
	)
	//defer runErrorAction(message, err)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
		if err != nil {
			handleError(message, event, err)
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	log.MainLog().Info(fmt.Sprintf("(%v) %v Start index selection for table %v.%v ...", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	if columnHistogram == nil {
		return
	}
	e := columnHistogram.(parDef.IndexColumnMe)
	plan.TPod.IndexName = fmt.Sprintf("%v", e.IndexName)
	plan.TPod.IndexType = fmt.Sprintf("%v", e.ColumnKey)
	if strings.Contains(strings.ToLower(plan.TPod.Task.TextCT), "lob") {
		plan.TPod.IndexName = "missIndex"
	}
	plan.TPod.IndexCol = func() string {
		var res []string
		for k, v := range e.ColumnMate {
			if k > 1 {
				continue
			}
			res = append(res, v.ColumnName)
		}
		return strings.Join(res, ",")
	}()
	histogram = global.PercentFloat(e.Cardinality, atomic.LoadInt64(plan.TPod.Task.Sum))
	switch {
	case global.CompareFloats(histogram, 50.00):
		plan.TPod.Task.RateEvaluation = "优"
	case global.CompareFloats(histogram, 30.00):
		plan.TPod.Task.RateEvaluation = "良"
	case global.CompareFloats(histogram, 20.00) && atomic.LoadInt64(plan.TPod.Task.Sum) <= 10000000:
		plan.TPod.Task.RateEvaluation = "良"
	case atomic.LoadInt64(plan.TPod.Task.Sum) == 0:
		plan.TPod.Task.RateEvaluation = "优"
	default:
		plan.TPod.Task.RateEvaluation = "差"
	}
	if histogram >= 30 {
		log.MainLog().Info(fmt.Sprintf("(%v) %v The selected index of table %v.%v is %v, and the dispersion degree of the column is %v.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, plan.TPod.IndexName, histogram))
	}
	return
}
