package loadData

import (
	"bytes"
	"encoding/json"
	"fmt"
	"github.com/google/uuid"
	"db2s/global"
	"db2s/outPut"
	"db2s/scheduleTask"
	"db2s/tableEr"
	mq "db2s/topic-mq"
	"reflect"
	"runtime"
	"strconv"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

func getTableName(schema, table string) (s string) {
	return fmt.Sprintf("%s.%s", schema, table)
}

// partitionMap 根据指定的partition name输出相应的shardname和partitionid
func (sp SchedulePlan2) partitionMap(partitionName string, object string) (partitionN, shardName, partitionId string) {
	switch object {
	case "source":
		partitionN = partitionName
		shardName = sp.Object.PartitionData.Shard.Source[partitionName]
		partitionId = sp.Object.PartitionData.ShardId.Source[partitionName]
	case "dest":
		partitionN = sp.Object.PartitionData.SourceDestNameMap[partitionName]
		shardName = sp.Object.PartitionData.Shard.Dest[partitionN]
		partitionId = sp.Object.PartitionData.ShardId.Dest[partitionN]
	}
	return
}

func errorDispos(_ int64, plan *SchedulePlan2, err string) (errstr string) {
	if strings.EqualFold(err, "column length abnormal") { //表结构不一致
		//var srcColumn, dstColumn []string
		//var returnerror bytes.Buffer
		//for _, v := range plan.Object.TableColData.SColumnInfo {
		//	srcColumn = append(srcColumn, v["columnName"])
		//}
		//for _, v := range plan.tableColData.DColumnInfo {
		//	dstColumn = append(dstColumn, v["columnName"])
		//}
		//aa := &ea.CheckSumTypeStruct{}
		//add, del := aa.Arrcmp(srcColumn, dstColumn)
		//for _, v := range add {
		//	returnerror.WriteString(fmt.Sprintf("add column %v ,", v))
		//}
		//for _, v := range del {
		//	returnerror.WriteString(fmt.Sprintf("drop column %v,", v))
		//}
		//errstr = returnerror.String()
		//returnerror.Reset()
	} else {
		errstr = err
	}
	return
}

// 出现错误集体处理
func (sp SchedulePlan2) errorAction(message mq.Message, logseq int64, err any) {
	taskObject, ok := message.Properties["taskObject"]
	if ok {
		if taskObject != nil {
			atomic.SwapInt64(taskObject.(fp.TaskDistribution).ErrEnd, -1)
		}
	}
	atomic.AddInt64(sp.Status.ErrEnd, -1)
	sp.subTaskInfo.ErrorInfo.Store(fmt.Sprintf("%v", err))
	PlanContext.TaskBarSubsErrorSwap(sp.subTaskInfo, errorDispos(logseq, &sp, fmt.Sprintf("%v", err)))
	if PlanContext.TaskBarSubsStatusGet(sp.subTaskInfo) != -2 {
		PlanContext.TaskBarSubsStatusSwap(sp.subTaskInfo, "error")
	}
	PlanContext.TpodTableStatusSwap(terminalPods[getTableName(sp.Object.Schema, sp.Object.Table)].Load.SyncStatus, "error")
}

func (sp SchedulePlan2) speedMonitor(done chan struct{}, wg *sync.WaitGroup) {
	var (
		lastSecondExecReadTime, lastSecondExecWriteTime   uint64
		lastSecondExecReadCount, lastSecondExecWriteCount int64
		lastSecondReadSum, lastSecondWriteSum             int64
		speedRestricted                                   = "N"
		speedRestriction                                  string
		readRate, writeRate                               uint64
	)
	ll := time.NewTicker(time.Duration(1) * time.Second)
	for {
		select {
		case <-ll.C:
			if *sp.TableSpeedLimit.ReadSum <= 0 {
				continue
			}
			var speedlimitStatus bool
			//获取读取端和写入端的差异值
			l := (*sp.TableSpeedLimit.ReadSum - *sp.TableSpeedLimit.WriteSum) * 100
			p := l / (*sp.TableSpeedLimit.ReadSum)
			//获取读取端每秒的速率
			rq := *sp.TableSpeedLimit.ReadAvgTime - lastSecondExecReadTime
			rc := *sp.TableSpeedLimit.ReadExecCount - lastSecondExecReadCount
			if rq == 0 || rc == 0 {
				readRate = 0
			} else {
				readRate = rq / uint64(rc)
			}
			//获取写入端每秒的速率
			wq := *sp.TableSpeedLimit.WriteAvgTime - lastSecondExecWriteTime
			wc := *sp.TableSpeedLimit.WriteExecCount - lastSecondExecWriteCount
			if wq == 0 || wc == 0 {
				writeRate = 0
			} else {
				writeRate = wq / uint64(wc)
			}
			//读写差异速率判断
			if p >= 20 { //src data rate > dst data rate   ==> write rate low
				speedRestriction = "RWD"
				speedlimitStatus = true
			} else if writeRate > 1000 { //写入效率太差
				speedRestriction = "WR"
				speedlimitStatus = true
			} else if readRate > 1000 { //读取效率太差
				speedRestriction = "RR"
				speedlimitStatus = true
			}
			if speedlimitStatus {
				speedRestricted = "Y"
				atomic.SwapInt64(sp.TableSpeedLimit.Status, 1)
				if sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") >= 1000 {
					sp.TableSpeedLimit.SpeedLimit.SetCapacity(sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") / 1000)
				}
				if readRate > 1000 || writeRate > 1000 {
					if atomic.LoadInt64(sp.ReadOptimizer.Thread) > 5 {
						atomic.SwapInt64(sp.ReadOptimizer.Thread, atomic.LoadInt64(sp.ReadOptimizer.Thread)-1)
					}
					if atomic.LoadInt64(sp.writeOptimizer.Thread) > 1 {
						atomic.SwapInt64(sp.writeOptimizer.Thread, atomic.LoadInt64(sp.writeOptimizer.Thread)-1)
					}
				}
			} else {
				speedRestricted = "N"
				atomic.SwapInt64(sp.TableSpeedLimit.Status, 0)
				if sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") <= 100000000 {
					sp.TableSpeedLimit.SpeedLimit.SetCapacity(sp.TableSpeedLimit.SpeedLimit.GetRefill("cap") * 10)
				}
				if readRate < 501 && rc > 0 && writeRate < 501 && wc > 0 {
					atomic.SwapInt64(sp.ReadOptimizer.Thread, atomic.LoadInt64(sp.ReadOptimizer.Thread)+1)
					atomic.SwapInt64(sp.writeOptimizer.Thread, atomic.LoadInt64(sp.writeOptimizer.Thread)+1)
				}
			}
			if GlobalPConfigs.result.Teletypewriter == "bar" {
				if PlanContext.TaskBarSubsStatusGet(sp.subTaskInfo) != -2 {
					PlanContext.SpeedTaskBarAccumulate(mq.SpeedLimitMonitor{
						Schema: sp.Object.Schema,
						Table:  sp.Object.Table,
						SS:     speedRestricted,
						SR:     speedRestriction,
						RS:     fmt.Sprintf("%v", *sp.TableSpeedLimit.ReadSum-lastSecondReadSum),
						WS:     fmt.Sprintf("%v", *sp.TableSpeedLimit.WriteSum-lastSecondWriteSum),
						RWD:    fmt.Sprintf("%v", p),
						RR:     fmt.Sprintf("%v", readRate),
						WR:     fmt.Sprintf("%v", writeRate),
						RP:     fmt.Sprintf("%v", *sp.TableSpeedLimit.ReadParallel),
						WP:     fmt.Sprintf("%v", *sp.TableSpeedLimit.WriteParallel),
						BS:     fmt.Sprintf("%v", sp.TableSpeedLimit.SpeedLimit.GetRefill("cap")),
						CGL:    fmt.Sprintf("%v", atomic.LoadInt64(sp.ReadOptimizer.Thread)),
						RBS:    fmt.Sprintf("%v", sp.ReadOptimizer.ChunkSum),
					})
				}
			}
			lastSecondReadSum = *sp.TableSpeedLimit.ReadSum
			lastSecondWriteSum = *sp.TableSpeedLimit.WriteSum
			lastSecondExecReadTime = *sp.TableSpeedLimit.ReadAvgTime
			lastSecondExecWriteTime = *sp.TableSpeedLimit.WriteAvgTime
			lastSecondExecReadCount = *sp.TableSpeedLimit.ReadExecCount
			lastSecondExecWriteCount = *sp.TableSpeedLimit.WriteExecCount
		case <-done:
			wg.Done()
			return
		}
	}
}

// srcMissIndexCount
func srcMissIndexCount(wg *sync.WaitGroup, partitionName1 string, plan *SchedulePlan2, whereSql string, logseq int64) {
	var (
		event    = "[srcMissIndexCount]"
		tableSum int64
		vlog     string
	)
	wg.Add(1)
	defer func() {
		wg.Done()
	}()
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	var ShardName, PartitionId string
	if strings.EqualFold(plan.Object.PartitionData.Logo.Source, "single") {
		ShardName = "single"
	} else {
		_, ShardName, PartitionId = plan.partitionMap(partitionName1, "source")
	}
	if t3, err := Er.Sum(Er.TableSum{DBType: GlobalPConfigs.dSns.SrcDBName}).Count(global.TableSumInput{
		TableInfo: global.TableInfo{
			Schema:        plan.Object.Schema,
			Table:         plan.Object.Table,
			PartitionName: partitionName1,
			PartitionId:   PartitionId,
			ShardName:     ShardName,
		},
		SqlFilter: global.SqlFilter{
			WhereSql: whereSql,
		},
		Db:     GlobalPConfigs.SDB,
		LogSeq: logseq,
	}); err != nil {
		vlog = fmt.Sprintf("(%d) %v %v Failed to obtain table data from source database count. error info is {%v}", logseq, callFuncInfo, event, err)
		WLog.Error(vlog)
		PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorDispos(logseq, plan, fmt.Sprintf("%v", err)))
		return
	} else {
		tableSum = int64(t3)
	}
	atomic.AddInt64(plan.load.CheckRows.SrcCheckCount, tableSum)
	atomic.AddInt64(plan.subTaskInfo.RecordCount, tableSum)
	atomic.AddInt64(terminalPods[getTableName(plan.Object.Schema, plan.Object.Table)].Load.SyncSum, tableSum)
	vlog = fmt.Sprintf("(%d) %v %v Source database count obtains table data successfully. result is {%v}", logseq, callFuncInfo, event, tableSum)
	WLog.Debug(vlog)
}

type tableMeta struct {
	TableSum   int64              `json:"sum"`
	ColumnData []global.TableMeta `json:"column"`
}

// 表的元数据信息写入到文件中
func tableMetaWriteFile(plan *SchedulePlan2) {
	defer func() {
		if err := plan.load.WriteTableMeta.Close(); err != nil {
			return
		}
	}()
	WriteTextI := outPut.FileOut("text", map[string]any{"fileN": plan.load.WriteTableMeta, "logSeq": int64(1),
		"bufSize": 1024 * 1024 * 4, "sqlType": "sql", "rowsS": GlobalPConfigs.rules.Load.RowsExcision, "columnS": GlobalPConfigs.rules.Load.ColumnExcision,
	})

	jsonData, err := json.Marshal(tableMeta{
		TableSum:   *plan.load.CheckRows.SrcCheckCount,
		ColumnData: plan.Object.TableColData.SColumnInfo,
	})
	if err != nil {
		fmt.Println("JSON encoding error:", err)
		return
	}
	jsonData1 := string(jsonData)
	WriteTextI.AppendWrite("", jsonData1)
}
func (sp *SchedulePlan2) tableMetaArr(old, new []global.TableMeta) ([]global.TableMeta, bool) {
	var (
		new1 []global.TableMeta
	)
	for k, v := range old {
		if !GlobalPConfigs.rules.Load.IgColumn {
			var status bool
			if !strings.EqualFold(v.ColumnName, new[k].ColumnName) {
				status = true
				break
			}
			if status {
				return nil, false
			}
			new1 = append(new1, v)
		} else {
			var status bool
			for _, v1 := range new {
				if strings.EqualFold(v.ColumnName, v1.ColumnName) {
					status = true
					break
				}
			}
			if status {
				new1 = append(new1, v)
			}
		}
	}
	return new1, true
}
func tableMetaReadFile(plan *SchedulePlan2) bool {
	var (
		//tablem []map[string]string
		tmetadata tableMeta
	)
	WriteTextI := outPut.FileOut("text", map[string]any{"fileName": plan.load.ReadTableFileName, "logSeq": int64(1),
		"rowsS": GlobalPConfigs.rules.Load.RowsExcision, "columnS": "", "curry": 1, "queue": GlobalPConfigs.rules.QueueSize,
	})
	o := WriteTextI.Read1()
	for {
		select {
		case l, ok := <-o.(chan any):
			if !ok {
				s1, s2 := plan.tableMetaArr(tmetadata.ColumnData, plan.Object.TableColData.SColumnInfo)
				if !s2 {
					return false
				}
				plan.Object.TableColData.SColumnInfo = s1
				atomic.SwapInt64(plan.load.CheckRows.SrcCheckCount, tmetadata.TableSum)
				atomic.SwapInt64(plan.subTaskInfo.RecordCount, tmetadata.TableSum)
				atomic.SwapInt64(terminalPods[getTableName(plan.Object.Schema, plan.Object.Table)].Load.SyncSum, tmetadata.TableSum)
				return true
			}
			switch fmt.Sprintf("%v", reflect.TypeOf(l)) {
			case "*string":
				if err := json.Unmarshal([]byte(*l.(*string)), &tmetadata); err != nil {
					return false
				}
			}
		}
	}
}
func (sp SchedulePlan2) ifTableLobColumn(sourcePartitionName string) (string, error) {
	var (
		lobSwitch    = false
		insertColumn []string
		insertPrefix any
		err          error
	)
	for _, v := range sp.Object.TableColData.SColumnInfo {
		insertColumn = append(insertColumn, v.ColumnName)
		dateType := strings.ToLower(v.DataType)
		if strings.Contains(dateType, "lob") {
			lobSwitch = true
		}
	}
	var s1 []string
	if lobSwitch {
		for i := 0; i < len(insertColumn); i++ {
			s1 = append(s1, "?")
		}
	}
	partitionName, shardName, partitionId := sp.partitionMap(sourcePartitionName, "dest")
	generalSqlInput := global.PrefixInput{
		LogSeq: sp.Status.LogSeq,
		Insert: global.Insert{
			Prefix: global.InsertPrefix{
				TableInfo: global.TableInfo{
					Schema:        sp.Object.Schema,
					Table:         sp.Object.Table,
					PartitionName: partitionName,
					ShardName:     shardName,
					PartitionId:   partitionId,
				},
				PlaceholderValue: s1,
				InsertColumn:     insertColumn,
				LobSwitch:        lobSwitch,
			},
		},
	}
	if insertPrefix, err = Er.Sql(Er.TableSql{DBType: GlobalPConfigs.dSns.SrcDBName}).Prefix(generalSqlInput); err != nil {
		return "", err
	}
	return insertPrefix.(string), nil
}
func (sp *SchedulePlan2) missSendMessage(wg *sync.WaitGroup, message mq.Message) {
	var (
		topic     = loadOutputWriteData
		tableName = string(message.Body)
		logseq    = message.Properties["logSeq"].(int64)
		err       error
	)
	for _, partitionName1 := range sp.Object.PartitionData.Name.Source {
		go srcMissIndexCount(wg, partitionName1, sp, "", logseq)
		wg.Add(1)
		go func(partitionName string, uid uuid.UUID) {
			defer func() {
				wg.Done()
			}()
			var tableRows chan any
			var insertPrefix string
			if tableRows, err = sp.syncMissSegmentQuery(partitionName, logseq); err != nil {
				sp.errorAction(message, logseq, err)
				return
			}
			if insertPrefix, err = sp.ifTableLobColumn(partitionName); err != nil {
				sp.errorAction(message, logseq, err)
				return
			}
			var fatherSonTags = make(map[string][]string)
			l := chanSendMsg{
				Object:   "miss",
				SendData: tableRows,
				SendMsgInfo: SendMsgInfo{
					CheckMod:  getCheckMod(),
					Topic:     topic,
					TableName: tableName,
				},
				SendMsg: sendMsg,
				FatherSon: FatherSon{
					FatherTags: map[string]any{"logSeq": logseq, "plan": message.Properties["plan"],
						"taskObjectNeglect": true, "label": map[string]any{uid.String(): []string{}},
						"insertPrefix": insertPrefix,
						"sendMsgKey":   "chunkData", "subTask": sp.subTaskInfo, "topic": string(topic),
						"sourcePartitionName": partitionName, "outFay": GlobalPConfigs.result.Teletypewriter},
					FatherSonTags: fatherSonTags,
				},
				SendMsgEnd: func() {
				},
			}
			if !chanMonitorSendMsg(&l) {
				return
			}
			if atomic.LoadInt64(sp.Status.ErrEnd) > -1 {
				sp.mqProductCustomerMonitor.ProductMessage <- fatherSonTags
				atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelProductSend, 1)
				atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelCustomerReceive, 1)
				atomic.AddInt64(sp.mqProductCustomerMonitor.MqStartTiming, 1)
			}
		}(partitionName1, uuid.New())
	}
}

func (sp *SchedulePlan2) IsSegmentSendMessage(wg *sync.WaitGroup, message mq.Message) {
	var (
		topic             = loadOutputIndexConcurrentQuery
		tableName         = string(message.Body)
		taskObject1       = fp.TaskDistribution{}
		logseq            = message.Properties["logSeq"].(int64)
		errEnd      int64 = 0
	)
	taskObject1 = fp.TaskDistribution{
		LogSeq:     logseq,
		DataSource: sp.ReadOptimizer.ChunkStartSource,
		TableObject: fp.TableObject{
			Schema: sp.Object.Schema,
			Table:  sp.Object.Table,
		},
		Db: fp.ConnDb{
			SourceObject: GlobalPConfigs.dSns.SrcDBName,
			Sdb:          GlobalPConfigs.SDB,
		},
		TableInfo: fp.TableInfo{
			TableColumn: sp.Object.TableColData.SColumnInfo,
			IndexColumn: sp.ReadOptimizer.IndexColumn,
			IndexName:   sp.ReadOptimizer.IndexName,
		},
		StaticP: fp.StaticParameter{
			CpLength:    GlobalPConfigs.rules.QueueSize,
			MulFactor:   sp.ReadOptimizer.Thread,
			ChunkNumber: sp.ReadOptimizer.ChunkSum,
		},
		ErrEnd: &errEnd,
	}
	for _, partitionName1 := range sp.Object.PartitionData.Name.Source {
		go srcMissIndexCount(wg, partitionName1, sp, "", logseq)
		wg.Add(1)
		go func(wg1 *sync.WaitGroup, partitionName string, taskObject fp.TaskDistribution) {
			var labelChecklist = make(map[string][]string)
			var insertPrefix string
			var err error
			defer func() {
				if atomic.LoadInt64(sp.Status.ErrEnd) > -1 {
					atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelProductSend, int64(len(labelChecklist)))
					atomic.AddInt64(sp.mqProductCustomerMonitor.MqStartTiming, 1)
				}
				wg1.Done()
			}()
			taskObject.TableObject.PartitionName = partitionName
			_, taskObject.TableObject.ShardName, taskObject.TableObject.PartitionId = sp.partitionMap(partitionName, "source")
			var tableWhereAdd string
			if w1, ok1 := GlobalPConfigs.schema.WhereAdd[getSchemaToTableMap(sp.Object.Schema, sp.Object.Table)]; ok1 {
				tableWhereAdd = w1
			}
			if insertPrefix, err = sp.ifTableLobColumn(partitionName); err != nil {
				sp.errorAction(message, logseq, err)
				return
			}
			sqlWhere := taskObject.IsSegmentValue(tableWhereAdd, sp.ReadOptimizer.IndexColumn)
			//send message
			l := chanSendMsg{
				Object:   "first",
				SendData: sqlWhere,
				SendMsgInfo: SendMsgInfo{
					CheckMod:  getCheckMod(),
					Topic:     topic,
					TableName: tableName,
				},
				SendMsg:          sendMsg,
				SpeedLimit:       sp.TableSpeedLimit.SpeedLimit,
				SpeedLimitSwitch: true,
				Status:           sp.TableSpeedLimit.Status,
				FatherSon: FatherSon{
					FatherTags: map[string]any{"logSeq": logseq, "plan": message.Properties["plan"],
						"taskObject": taskObject,
						"sendMsgKey": "segmentSql", "label": map[string]any{"no": []string{}},
						"err": sp.Status.ErrEnd, "subTask": sp.subTaskInfo, "topic": string(topic),
						"insertPrefix": insertPrefix,
						"outFay":       GlobalPConfigs.result.Teletypewriter, "sourcePartitionName": partitionName},
					FatherSonTags: labelChecklist,
				},
				SendMsgEnd: func() {
				},
			}
			if !chanMonitorSendMsg(&l) {
			}
		}(wg, partitionName1, taskObject1)
	}
}

// whereSqlFilter sql 过滤
func (sp *SchedulePlan2) whereSqlFilter(wg *sync.WaitGroup, message mq.Message, v string) (sqlWhere chan any) {
	var (
		topic        = loadOutputQueryTableData
		tableName    = string(message.Body)
		logseq       = message.Properties["logSeq"].(int64)
		insertPrefix string
		err          error
	)
	wg.Add(1)
	go srcMissIndexCount(wg, "single", sp, v, logseq)
	if insertPrefix, err = sp.ifTableLobColumn("single"); err != nil {
		sp.errorAction(message, logseq, err)
		return
	}
	tableSql := sp.tmpSyncTableQuerySql(global.StartPart{
		LogSeq: logseq,
		TableInfo: global.TableInfo{
			Schema: sp.Object.Schema,
			Table:  sp.Object.Table,
		},
		SelectColumn: global.SelectColumnInput{
			Schema:      sp.Object.Schema,
			Table:       sp.Object.Table,
			IndexColumn: sp.ReadOptimizer.IndexColumn,
			TableColumn: sp.Object.TableColData.SColumnInfo,
			IndexName:   sp.ReadOptimizer.IndexName,
		},
		WhereGenerate: global.WhereGenerateInput{
			Schema:      sp.Object.Schema,
			Table:       sp.Object.Table,
			TableColumn: sp.Object.TableColData.SColumnInfo,
			SqlFilter:   global.SqlFilter{WhereSql: v},
		},
	}, "single")
	if !sendMsg(getCheckMod(), topic, tableName, map[string]any{"logSeq": logseq, "plan": message.Properties["plan"],
		"taskObject": message.Properties["taskObject"], "label": message.Properties["label"],
		"strsql": tableSql, "err": sp.Status.ErrEnd, "sourcePartitionName": "single",
		"insertPrefix": insertPrefix, "WriteTextI": message.Properties["WriteTextI"],
		"subTask": sp.subTaskInfo, "topic": string(topic), "outFay": GlobalPConfigs.result.Teletypewriter}) {
		return
	}
	if atomic.LoadInt64(sp.Status.ErrEnd) > -1 {
		atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelProductSend, 1)
		atomic.AddInt64(sp.mqProductCustomerMonitor.MqStartTiming, 1)
		atomic.AddInt64(sp.mqProductCustomerMonitor.MqLevelCustomerReceive, 1)
		atomic.AddInt64(sp.mqProductCustomerMonitor.MqProductSeq, 1)
	}
	return
}

func (sp SchedulePlan2) syncMissSegmentQuery(partitionName string, logseq int64) (l chan any, err error) {
	var (
		event       = "[syncMissSegmentQuery]"
		vlog        string
		errEnd      int64 = 0
		shardName   string
		partitionId string
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%d) %v %v Start data migration of non-index table %v.%v", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Info(vlog)
	if strings.EqualFold(sp.Object.PartitionData.Object, "single") {
		//shardName = sps.Object.ShardName["source"]
	} else {
		shardName = sp.Object.PartitionData.Shard.Source[partitionName]
		partitionId = sp.Object.PartitionData.ShardId.Source[partitionName]
	}
	taskObject := fp.TaskDistribution{
		LogSeq:     logseq,
		DataSource: sp.ReadOptimizer.ChunkStartSource,
		TableObject: fp.TableObject{
			Schema:        sp.Object.Schema,
			Table:         sp.Object.Table,
			PartitionName: partitionName,
			ShardName:     shardName,
			PartitionId:   partitionId,
		},
		Db: fp.ConnDb{
			SourceObject: GlobalPConfigs.dSns.SrcDBName,
			DestObject:   GlobalPConfigs.dSns.DestDBName,
			Sdb:          GlobalPConfigs.SDB,
			Ddb:          GlobalPConfigs.DDB,
		},
		TableInfo: fp.TableInfo{
			TableColumn: sp.Object.TableColData.SColumnInfo,
			IndexColumn: sp.ReadOptimizer.IndexColumn,
		},
		StaticP: fp.StaticParameter{
			CpLength:    GlobalPConfigs.rules.QueueSize,
			ChunkNumber: sp.ReadOptimizer.ChunkSum,
		},
		ErrEnd: &errEnd,
	}
	sdr, err1 := taskObject.MissChunkObject(logseq)
	if err1 != nil {
		vlog = fmt.Sprintf("(%d) %v %v Querying data from table %v.%v without index failed. error info is {%v}", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table, err)
		WLog.Error(vlog)
		return nil, err1
	}
	l = make(chan any, GlobalPConfigs.rules.QueueSize)
	go func(dr any) {
		var m [][]*string
		var i int64
		for {
			select {
			case c, ok := <-dr.(chan any):
				if !ok {
					if len(m) > 0 {
						l <- m
					}
					vlog = fmt.Sprintf("(%d) %v %v Data reading of non-index table %v.%v is completed.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
					WLog.Info(vlog)
					close(l)
					return
				} else {
					atomic.AddInt64(sp.TableSpeedLimit.ReadSum, 1)
					c1 := c.([]*string)
					e := atomic.LoadInt64(sp.Status.ErrEnd)
					if e < 0 {
						vlog = fmt.Sprintf("(%d) %v %v Data reading from non-indexed table %v.%v terminates abnormally.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
						WLog.Warn(vlog)
						close(l)
						return
					}
					atomic.AddInt64(terminalPods[getTableName(sp.Object.Schema, sp.Object.Table)].Sync.SelectRows, 1)
					if atomic.LoadInt64(sp.Status.ErrEnd) > -1 {
						PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": sp.subTaskInfo, "select": int64(1), "insert": int64(0)})
					}
					i++
					if i > sp.writeOptimizer.FixSum {
						l <- m
						m = [][]*string{}
						i = 0
					}
					m = append(m, c1)
				}
			}
		}
	}(sdr[sp.ReadOptimizer.ChunkStartSource])
	return
}

func (sp SchedulePlan2) tmpSyncTableQuerySql(s global.StartPart, partitionName string) (strsql string) {
	var (
		vlog  string
		err   error
		buff  bytes.Buffer
		ssql  string
		event = "[syncQueryTableSql]"
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)

	vlog = fmt.Sprintf("(%v) %v %s sync data source query chunk where is %v", s.LogSeq, callFuncInfo, event, s)
	WLog.Debug(vlog)
	s.TableInfo.PartitionName = partitionName
	_, s.TableInfo.ShardName, s.TableInfo.PartitionId = sp.partitionMap(partitionName, "source")
	s.SelectColumn.IndexName = sp.ReadOptimizer.IndexName
	//查询该表的列名和列信息xz
	if ssql, err = Er.RowsData(Er.TableRows{DBType: GlobalPConfigs.dSns.SrcDBName}).Sql(s); err != nil {
		return
	} else if ssql != "" {
		buff.WriteString(ssql)
	}
	vlog = fmt.Sprintf("(%v) %v %s sync data source query sql is {%v}", s.LogSeq, callFuncInfo, event, ssql)
	WLog.Debug(vlog)
	strsql = buff.String()
	buff.Reset()
	return strsql
}

// syncQueryTableData 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
func (sp SchedulePlan2) syncQueryTableData(selectSql string, partitionName string, logseq int64) (any, error) {
	var (
		vlog  string
		stt   any
		err   error
		event = "[syncQueryTableData]"
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)

	vlog = fmt.Sprintf("(%d) %v %v start Query row data of table %s.%s", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	l := global.RowsDataInput{
		LogSeq:    logseq,
		DB:        GlobalPConfigs.SDB,
		Sql:       selectSql,
		TableInfo: global.TableInfo{Schema: sp.Object.Schema, Table: sp.Object.Table, PartitionName: partitionName},
	}
	_, l.TableInfo.ShardName, l.TableInfo.BackendTableName = sp.partitionMap(partitionName, "source")
	if stt, err = Er.RowsData(Er.TableRows{DBType: GlobalPConfigs.dSns.SrcDBName}).Data(l); err != nil {
		vlog = fmt.Sprintf("(%d) %v %v The data query of table %s.%s fails. error is {%v}", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table, err)
		WLog.Error(vlog)
		return nil, err
	}
	atomic.AddUint64(sp.TableSpeedLimit.ReadAvgTime, uint64(stt.(global.ExecSqlResult).Timeout))
	atomic.AddInt64(sp.TableSpeedLimit.ReadExecCount, 1)
	vlog = fmt.Sprintf("(%d) %v %v Let the data query of table %v.%v be completed.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	return stt.(global.ExecSqlResult).Result, nil
}

func (sp SchedulePlan2) loadOutputWriteData(WriteTextI any, logseq int64) {
	var (
		sum int64
	)
	for {
		select {
		case chunkData, ok := <-sp.writeOptimizer.QueueData:
			if !ok {
				return
			}
			atomic.AddInt64(sp.TableSpeedLimit.WriteParallel, 1)
			switch fmt.Sprintf("%v", reflect.TypeOf(chunkData)) {
			case "[][]*string":
				sum = int64(len(chunkData.([][]*string)))
			}
			beginTime := time.Now()
			if !WriteTextI.(outPut.Result).AppendWrite("", chunkData) {
				//err := errors.New(fmt.Sprintf("%v", `write data fail!`))
				return
			}
			if GlobalPConfigs.result.Teletypewriter == "bar" {
				if atomic.LoadInt64(sp.Status.ErrEnd) > -1 {
					PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": sp.subTaskInfo, "select": int64(0), "insert": sum})
				}
			}
			atomic.AddInt64(sp.TableSpeedLimit.WriteSum, sum)
			atomic.AddUint64(sp.TableSpeedLimit.WriteAvgTime, uint64(time.Since(beginTime).Milliseconds()))
			atomic.AddInt64(sp.TableSpeedLimit.WriteExecCount, 1)
			atomic.AddInt64(sp.TableSpeedLimit.WriteParallel, -1)
			atomic.AddInt64(terminalPods[getTableName(sp.Object.Schema, sp.Object.Table)].Load.InsertRows, sum)
			atomic.AddInt64(sp.mqProductCustomerMonitor.MqCustomerSeq, 1)
		}
	}
}

func (sp SchedulePlan2) loadInputReadData1() (l chan any) {
	var (
		result    [][]*string
		ii        int64
		beginTime = time.Now()
		ff        = outPut.FileOut("text", map[string]any{"fileName": sp.load.ReadFileName, "curry": 1, "queue": GlobalPConfigs.rules.QueueSize,
			"rowsS": GlobalPConfigs.rules.Load.RowsExcision, "columnS": GlobalPConfigs.rules.Load.ColumnExcision})
	)
	m := ff.Read1()
	l = make(chan any, GlobalPConfigs.rules.QueueSize)
	go func() {
		var emptyStatus = global.IsEmptyStruct(sp.TableSpeedLimit.SpeedLimit)
		for {
			if atomic.LoadInt64(sp.TableSpeedLimit.Status) == 1 {
				if !emptyStatus && !sp.TableSpeedLimit.SpeedLimit.TryAcquire() {
					continue
				}
			}
			select {
			case c, ok := <-m.(chan any):
				if !ok {
					if len(result) > 0 {
						atomic.AddUint64(sp.TableSpeedLimit.ReadAvgTime, uint64(time.Since(beginTime).Milliseconds()))
						atomic.AddInt64(sp.TableSpeedLimit.ReadSum, int64(len(result)))
						atomic.AddInt64(sp.TableSpeedLimit.ReadExecCount, int64(len(result)))
						l <- result
					}
					close(l)
					return
				}
				var c2 []*string
				switch fmt.Sprintf("%v", reflect.TypeOf(c)) {
				case "[]*string":
					c2 = c.([]*string)
				}
				if len(c2) == 0 {
					continue
				}
				if GlobalPConfigs.result.Teletypewriter == "bar" {
					if PlanContext.TaskBarSubsStatusGet(sp.subTaskInfo) != -2 {
						PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": sp.subTaskInfo, "select": int64(1), "insert": int64(0)})
					}
				}
				atomic.AddInt64(terminalPods[getTableName(sp.Object.Schema, sp.Object.Table)].Load.SelectRows, 1)
				c1 := sp.columnDataDis(c2)
				ii++
				if ii < sp.writeOptimizer.FixSum {
					result = append(result, c1)
				} else {
					l <- result
					atomic.AddUint64(sp.TableSpeedLimit.ReadAvgTime, uint64(time.Since(beginTime).Milliseconds()))
					atomic.AddInt64(sp.TableSpeedLimit.ReadSum, int64(len(result)))
					atomic.AddInt64(sp.TableSpeedLimit.ReadExecCount, int64(len(result)))
					beginTime = time.Now()
					ii = 0
					result = [][]*string{c1}
				}
			}
		}
	}()
	return l
}

func (sp SchedulePlan2) columnDataDis(s []*string) []*string {
	var news []*string
	defer func() {
		if err := recover(); err != nil {
			fmt.Println("-------------dddff:", sp.Object.TableColData.SColumnInfo)
			for k, v := range s {
				fmt.Println("--------000000:", k, *v)
			}
			for k, v := range news {
				fmt.Println("--------111111:", k, *v)
			}
		}
	}()
	if GlobalPConfigs.rules.Load.IgColumn {
		for _, v := range sp.Object.TableColData.SColumnInfo {
			var seq int
			if se, err := strconv.Atoi(v.ColumnSeq); err != nil {
				return nil
			} else {
				seq = se
			}
			if seq > 0 {
				news = append(news, s[seq-1])
			}
		}
	} else {
		news = s
	}
	return news
}

func (sp SchedulePlan2) loadDataInputGeneralInsert1(insertPrefix string, chunkData any, logseq int64) (chan any, bool, error) {
	var (
		chunkCount       [][]*string
		vlog             string
		event            = "[generalInsert]"
		insertRows       int64
		generalMergeChan = make(chan any, GlobalPConfigs.rules.QueueSize)
		generalChan      = make(chan any, GlobalPConfigs.rules.QueueSize)
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%v) %v %v Table %v.%v starts to determine whether there is a lob field in the table.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	vlog = fmt.Sprintf("(%v) %v %v The insert prefix statement of table %v.%v is generated. result is {%v}", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table, insertPrefix)
	WLog.Debug(vlog)
	chunkCount = chunkData.([][]*string)
	var ll [][][]*string
	insertRows = int64(len(chunkCount))
	if insertRows > sp.writeOptimizer.FixSum {
		var sum int64
		var mm [][]*string
		for _, v := range chunkCount {
			sum++
			if sum <= sp.writeOptimizer.FixSum {
				mm = append(mm, v)
			} else {
				ll = append(ll, mm)
				sum = 0
				mm = [][]*string{v}
			}
		}
		if len(mm) > 0 {
			ll = append(ll, mm)
		}
		mm = nil
	} else {
		ll = append(ll, chunkCount)
	}
	if strings.Contains(insertPrefix, "?,?") {
		for _, v := range ll {
			generalChan <- map[int64][][]*string{int64(len(v)): v}
		}
		close(generalChan)
		return generalChan, true, nil
	}
	go func() {
		var wg sync.WaitGroup
		for _, v := range ll {
			wg.Add(1)
			go func(vv [][]*string) {
				defer func() {
					wg.Done()
				}()
				generalSqlInput := global.PrefixInput{LogSeq: logseq, Insert: global.Insert{
					Value: global.InsertValue{
						TableInfo: global.TableInfo{
							Schema: sp.Object.Schema, Table: sp.Object.Table,
						},
						Prefix:             insertPrefix,
						RowData:            vv,
						ColData:            sp.Object.TableColData.SColumnInfo,
						IgnoreColumnLength: GlobalPConfigs.rules.Load.IgColumn,
						Esa:                sp.Object.Esa,
					},
				}}
				if row, err := Er.Sql(Er.TableSql{DBType: GlobalPConfigs.dSns.SrcDBName}).Insert(generalSqlInput); err != nil {
					vlog = fmt.Sprintf("(%v) %v %v Failed to generate insert statement for table %v.%v. error is {%v}", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table, err)
					WLog.Error(vlog)
					atomic.SwapInt64(sp.Status.ErrEnd, -1)
					return
				} else if row != nil {
					sum := int64(len(vv))
					if len(*row.(*string)) > 0 {
						generalChan <- map[int64]*string{sum: row.(*string)}
					}
				}
				for _, v1 := range vv {
					for _, v2 := range v1 {
						runtime.SetFinalizer(v2, nil)
					}
				}
			}(v)
		}
		wg.Wait()
		close(generalChan)
	}()
	go func() {
		var resultSum int64
		var insertSum int64
		var resultGarth []*string
		for {
			select {
			case result, ok := <-generalChan:
				if !ok {
					if len(resultGarth) > 0 {
						generalMergeChan <- map[int64]any{insertSum: resultGarth}
					}
					close(generalMergeChan)
					return
				}
				resultSum++
				for k, v := range result.(map[int64]*string) {
					insertSum += k
					resultGarth = append(resultGarth, v)
				}
				//设置单个事务最大写入的页大小
				//一个页page大小为16kb
				//一个区extent包含64个连续的page == 1MB
				//一个段segment最多包含64个区 == 64M  此处限制16个区，一个事务写入16个区的数据，单个事务大小为16M
				if resultSum >= 64*atomic.LoadInt64(sp.writeOptimizer.Thread) {
					generalMergeChan <- map[int64]any{insertSum: resultGarth}
					resultSum = 0
					resultGarth = []*string{}
				}
			}
		}
	}()
	vlog = fmt.Sprintf("(%v) %v %v The insert statement of table %v.%v is generated. Generate repair transactions according to the number of fixes.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	return generalMergeChan, false, nil
}

func (sp SchedulePlan2) writeInsert(lobSwitch bool, prefix string, s any, sourcePartitionName string, logseq int64) (err error) {
	var (
		vlog        string
		event       = "[writeInsert]"
		execTimeout any
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%v) %v %v Table %v.%v starts executing the insert statement.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	// 切片为空
	if s == nil {
		vlog = fmt.Sprintf("(%v) %v %v The insert statement of table %v.%v is an empty string and will exit early.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
		WLog.Debug(vlog)
		return
	}
	partitionName, shardName, partitionId := sp.partitionMap(sourcePartitionName, "src")
	rowsActive := global.RowsActiveInput{
		LogSeq: logseq,
		DB:     GlobalPConfigs.SDB,
		TableInfo: global.TableInfo{
			Schema:        sp.Object.Schema,
			Table:         sp.Object.Table,
			LobSwitch:     lobSwitch,
			PartitionName: partitionName,
			ShardName:     shardName,
			PartitionId:   partitionId,
		},
		Optimizer: global.WriteOptimizer{
			SqlMode: GlobalPConfigs.rules.Load.SqlMode,
			// SqlLogBin: GlobalPConfigs.rules.Load.SqlLogBin,
		},
		ApplySqlGather: s,
		SqlPrefix:      prefix,
	}
	if execTimeout, err = Er.RActive(Er.TableRows{DBType: GlobalPConfigs.dSns.SrcDBName}).Insert(rowsActive); err != nil {
		vlog = fmt.Sprintf("(%v) %v %v The insert statement of table %v.%v executes incorrectly!!! error is {%v}", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table, err)
		WLog.Error(vlog)
		return err
	}
	atomic.AddUint64(sp.TableSpeedLimit.WriteAvgTime, uint64(execTimeout.(int64)))
	atomic.AddInt64(sp.TableSpeedLimit.WriteExecCount, 1)
	vlog = fmt.Sprintf("(%v) %v %v The insert statement of table %v.%v is completed.", logseq, callFuncInfo, event, sp.Object.Schema, sp.Object.Table)
	WLog.Debug(vlog)
	return
}
