package syR

import (
	"bytes"
	"db2s/Fp"
	"db2s/arg"
	ea "db2s/encryptionAlgorithm"
	"db2s/full"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/parDef"
	"db2s/ref"
	mq "db2s/topic-mq"
	"errors"
	"fmt"
	"github.com/panjf2000/ants/v2"
	"reflect"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

func renameTableActive(plan *SchedulePlan2) (err error) {
	var event = "[renameTableActive]"
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
		}
	}()
	switch {
	case len(plan.Object.PartName) > 0: //分区表无法备份
		err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("%v.%v is partition table,it is not recover.", plan.Object.Schema, plan.Object.Table)))
	default:
		if err = rename(plan); err != nil {
			err = ref.ErrAddPrintf(event, err)
		}
	}
	return
}

func truncateTableActive(name []string, plan *SchedulePlan2) (err error) {
	var event = "[truncateTable]"
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
			log.ErrorLog().Error(fmt.Sprintf("%v", err))
		}
	}()
	fmt.Println(fmt.Sprintf("-- [%v] %v report: begin truncate table %s.%s", time.Now().Format("2006-01-02 15:04:05"), arg.ToolName, plan.Object.Schema, plan.Object.Table))
	if GlobalPConfigs.rules.Sync.Recover {
		if err = renameTableActive(plan); err != nil {
			err = ref.ErrAddPrintf(event, err)
			log.ErrorLog().Error(fmt.Sprintf("%v rename table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
		}
		return
	}
	if err = truncate(name, plan); err != nil {
		err = ref.ErrAddPrintf(event, err)
		log.ErrorLog().Error(fmt.Sprintf("%v truncate table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
		return
	}
	return
}

// error info column不一致
func errorActive(plan *SchedulePlan2, err string) (errStr string) {
	if strings.EqualFold(err, "column length abnormal") { //表结构不一致
		var srcColumn, dstColumn []string
		var returnError bytes.Buffer
		aa := &ea.CheckSumTypeStruct{}
		add, del := aa.Arrcmp(srcColumn, dstColumn)
		for _, v := range add {
			returnError.WriteString(fmt.Sprintf("add column %v ,", v))
		}
		for _, v := range del {
			returnError.WriteString(fmt.Sprintf("drop column %v,", v))
		}
		errStr = returnError.String()
		returnError.Reset()
	} else {
		errStr = err
	}
	return
}
func initErrorAction(plan *SchedulePlan2, err error, podInfo string) {
	atomic.AddInt64(plan.Status.ErrEnd, -1)
	PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, fmt.Sprintf("%v", err))
	if PlanContext.TaskBarSubsStatusGet(plan.subTaskInfo) != -2 {
		PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
	}
	PlanContext.TPodTableStatusSwap(plan.TPod.Status, "error")
}
func runErrorAction(message mq.Message, err error) {
	var plan = message.Properties["plan"].(*SchedulePlan2)
	if err == nil {
		return
	}
	if taskObject, ok := message.Properties["taskObject"]; ok && taskObject != nil {
		atomic.SwapInt64(taskObject.(Fp.TaskDistribution).ErrEnd, -1)
	}
	PlanContext.TPodTableStatusSwap(plan.TPod.Status, "error")
	atomic.SwapInt64(plan.MQ.ErrStatus, -1)
	initErrorAction(plan, err, "errors")
	return
}

func srcCount(partitionName1 string, plan *SchedulePlan2, logSeq int64) (err error) {
	var (
		event       = "[srcCount]"
		tableSum    int64
		countResult any
	)
	//defer recoverException(event)
	log.MainLog().Info(fmt.Sprintf("(%v) %v Start the row count of partition %v in %v table %v.", logSeq, event, partitionName1, GlobalPConfigs.dSns.SrcDBName, getTableName(plan.Object.Schema, plan.Object.Table)))
	s := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName, GlobalPConfigs.dSns.DestDBName}, "left", plan.Object.Role)
	plan.SrcParameter.Object.Partition = partitionName1
	s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
	if countResult, err = full.GetFullCountST(s); err != nil {
		err = ref.ErrAddPrintf(event, err)
		PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorActive(plan, fmt.Sprintf("%v", err)))
		log.ErrorLog().Error(fmt.Sprintf("exec count for table %v fail. error is {%v}", getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
		return
	} else {
		tableSum = countResult.(int64)
	}
	atomic.AddInt64(plan.sync.CheckRows.SrcCheckCount, tableSum)
	atomic.AddInt64(plan.subTaskInfo.RecordCount, tableSum)
	atomic.AddInt64(plan.TPod.Sync.SyncSum, tableSum)
	log.MainLog().Info(fmt.Sprintf("(%d) %v The row count of Partition %v in table %v of %v was successfully counted. The result is %v.", logSeq, event, partitionName1, getTableName(plan.Object.Schema, plan.Object.Table), GlobalPConfigs.dSns.SrcDBName, tableSum))
	return
}
func count(partitionName string, wg *sync.WaitGroup, plan *SchedulePlan2) {
	var err error
	wg.Add(1)
	go func() {
		defer func() {
			atomic.SwapInt64(plan.MQ.FirstMsg.QuitExec, 1)
			wg.Done()
		}()
		if err = srcCount(partitionName, plan, plan.Object.ObjectLogSeq); err != nil {
			return
		}
	}()
	return
}
func whereSqlFilter(wg *sync.WaitGroup, message mq.Message) (err error) {
	var event = "[whereSqlFilter]"
	var tableSql string
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
			log.ErrorLog().Error(fmt.Sprintf("%v", err))
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	log.MainLog().Info(fmt.Sprintf("%v Start processing wheresql...", event))
	go count("single", wg, plan)
	newParameter := plan.SrcParameter
	newParameter.Options.WhereSql = plan.TableAttributes.WhereSql
	if plan.DstParameter.ExecInsert.Prefix, err = getInsertPre(plan); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("%v get trigger db insert prefix SQL for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
		return
	}
	message.Properties["sourcePartitionName"] = "single"
	message.Properties["sqlWhere"] = parDef.IndexPart{LogSeq: plan.Object.ObjectLogSeq, SpecifiedConditions: true}
	if tableSql, err = getQuerySql(message); err != nil {
		log.ErrorLog().Error(fmt.Sprintf("%v get source db query data SQL for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
		return
	}
	if !plan.MQ.SendMsgF(getMsgSendData(plan, map[string]any{"logSeq": plan.Object.ObjectLogSeq, "taskObject": message.Properties["taskObject"],
		"strSql": tableSql, "topic": syncQueryTableData, "sourcePartitionName": "single"})) {
		return
	}
	plan.MQ.FirstObjectCustomerAdd()
	plan.MQ.FirstMsgProductAdd()
	plan.MQ.FirstMsgCustomerAdd()
	plan.MQ.FirstMsgFinishMarkAdd()
	plan.MQ.SecondMsgProductAdd()
	plan.MQ.SecondMsgFinishMarkSwap()
	return
}

func IsSegmentSendMessage(wg *sync.WaitGroup, logSeq int64, message mq.Message) (err error) {
	var event = "[IsSegmentSendMessage]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	for _, partitionName1 := range plan.Object.PartName {
		go count(partitionName1, wg, plan)
		var td Fp.TaskDistribution
		if td, err = getScheduleTaskDistribution(plan, "left", partitionName1, time.Duration(GlobalPConfigs.rules.Sync.TimeOutNew.Segment)); err != nil {
			err = ref.ErrAddPrintf(event, err)
			return
		}
		td.IsSegmentValue()
		PlanContext.TaskBarSubsValueSwap(plan.subTaskInfo, "partitionName", partitionName1)
		plan.MQ.SendMsg.MsgData = getMsgSendData(plan, map[string]any{"logSeq": logSeq, "sendMsgKey": "segmentSql", "topic": syncIndexConcurrentQuery, "sourcePartitionName": partitionName1})
		plan.MQ.SendMsg.MsgSendFunc = SendMsgActionFunc(plan.MQ.SendMsg.MsgData)
		plan.MQ.SendMsg.MsgModifyKey = td.First.Result
		plan.MQ.ChanMonitorSendMsg("IsSegmentSendMessage")
	}
	plan.MQ.FirstMsgFinishMarkAdd()
	return
}

func missIndexGetDataFlay(taskObject Fp.TaskDistribution, plan *SchedulePlan2) (err error) {
	var sdr map[string]any
	var event = "[missIndexGetDataFlay]"
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
		}
	}()
	if sdr, err = taskObject.MissChunkObject(); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	go func() {
		if err = missIndexDataFlayMargeOrderBY(taskObject, plan, sdr[plan.ReadOptimizer.ChunkStartSource]); err != nil {
			err = ref.ErrAddPrintf(event, err)
			log.ErrorLog().Error(fmt.Sprintf("%v missIndex Get rowsData for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
			return
		}
	}()
	return
}
func missSendMessage(wg *sync.WaitGroup, message mq.Message) (err error) {
	var event = "[missSendMessage]"
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
			log.ErrorLog().Error(fmt.Sprintf("%v", err))
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	defer plan.MQ.FirstMsgFinishMarkSwap()
	var insertPrefix string
	for _, partitionName1 := range plan.Object.PartName {
		log.MainLog().Info(fmt.Sprintf("(%v) %v Start reading the data of partition %v of table %v without index.", plan.Object.ObjectLogSeq, event, partitionName1, getTableName(plan.Object.Schema, plan.Object.Table)))
		go count(partitionName1, wg, plan)
		func(partitionName string) {
			var td Fp.TaskDistribution
			if td, err = getScheduleTaskDistribution(plan, "left", partitionName, time.Duration(GlobalPConfigs.rules.Sync.TimeOutNew.Flow)); err != nil {
				err = ref.ErrAddPrintf(event, err)
				runErrorAction(message, err)
				log.ErrorLog().Error(fmt.Sprintf("%v get schedule task object for table %v(%v) fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), partitionName, err))
				return
			}
			PlanContext.TaskBarSubsValueSwap(plan.subTaskInfo, "partitionName", partitionName)
			if err = missIndexGetDataFlay(td, plan); err != nil {
				err = ref.ErrAddPrintf(event, err)
				runErrorAction(message, err)
				log.ErrorLog().Error(fmt.Sprintf("%v get schedule task object for table %v(%v) fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), partitionName, err))
				return
			}
			plan.MQ.SendMsg.MsgData = getMsgSendData(plan, map[string]any{"logSeq": plan.Object.ObjectLogSeq, "taskObjectNeglect": true, "sendMsgKey": "chunkData", "topic": syncGeneralInsert, "insertPrefix": insertPrefix, "sourcePartitionName": partitionName1})
			plan.MQ.SendMsg.MsgModifyKey = td.First.Result
			plan.MQ.SendMsg.MsgSuccessfulFunc = SendMsgExecSumFunc([]any{[]any{plan.MQ.SecondMsg.ProductMsg}, int64(1)})
			plan.MQ.SendMsg.MsgSendFunc = SendMsgActionFunc(plan.MQ.SendMsg.MsgData)
			plan.MQ.SendMsg.MsgFinishFunc = SendMsgExecSumFunc([]any{[]any{plan.MQ.FirstMsg.ProductMsg, plan.MQ.FirstMsg.CustomerMsg, plan.MQ.FirstMsg.CustomerObject, plan.MQ.SecondMsg.ProductSendMsgFinishMark}, int64(1)})
			plan.MQ.SpeedLimitSwitch = false
			plan.MQ.ChanMonitorSendMsg("missSendMessage")
			log.MainLog().Info(fmt.Sprintf("(%v) %v The data reading of partition %v in Table %v of %v has been completed.", plan.Object.ObjectLogSeq, event, partitionName1, getTableName(plan.Object.Schema, plan.Object.Table), GlobalPConfigs.dSns.SrcDBName))
		}(partitionName1)
	}
	return
}
func getQuerySql(message mq.Message) (strSql string, err error) {
	var (
		event = "[getQuerySql]"
		res   any
	)
	defer runErrorAction(message, err)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && err != nil {
			err = r
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var partitionName = message.Properties["sourcePartitionName"].(string)
	var sqlWhere = message.Properties["sqlWhere"].(parDef.IndexPart)
	log.MainLog().Debug(fmt.Sprintf("(%v) %v sync data source query chunk where is %v", sqlWhere.LogSeq, event, sqlWhere))
	c := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName}, "left", plan.Object.Role)
	newParameter := plan.SrcParameter
	newParameter.Con, newParameter.Meta, newParameter.Object.Partition = GlobalPConfigs.db.Source.GetDB(partitionName), sqlWhere, partitionName
	c.Parameter1 = append(c.Parameter1, newParameter)
	if res, err = c.GetFullSelectRowsSqlS(); err != nil {
		err = ref.ErrAddPrintf(event, err)
		log.ErrorLog().Error(fmt.Sprintf("%v get query data sql for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
		return
	}
	if res == nil {
		return
	}
	strSql = res.(string)
	log.MainLog().Info(fmt.Sprintf("(%v) %v sync data source query sql for table %v.%v is {%v}", sqlWhere.LogSeq, event, plan.Object.Schema, plan.Object.Table, strSql))
	return
}

// getRowsData 针对表的所有列的数据类型进行处理，将列类型转换成字符串，例如时间类型，并执行sql语句
func getRowsData(message mq.Message) (result any, err error) {
	var (
		event       = "[getRowsData]"
		res         any
		execTimeout int64
	)
	defer runErrorAction(message, err)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	var plan, selectSql, partitionName, logSeq = message.Properties["plan"].(*SchedulePlan2), message.Properties["strSql"].(string), message.Properties["sourcePartitionName"].(string), message.Properties["logSeq"].(int64)
	log.MainLog().Debug(fmt.Sprintf("(%d) %v start Query row data of table %s.%s", logSeq, event, plan.Object.Schema, plan.Object.Table))
	c := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName}, "left", plan.Object.Role)
	newParameter := plan.SrcParameter
	newParameter.Con, newParameter.Meta = GlobalPConfigs.db.Source.GetDB(partitionName), selectSql
	c.Parameter1 = append(c.Parameter1, newParameter)
	if res, err = c.GetFullReadDataST(); err != nil {
		runErrorAction(message, err)
		log.ErrorLog().Error(fmt.Sprintf("%v get source db rows data for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
		return
	}
	if res != nil {
		execTimeout = res.(global.Return).TimeOut
		result = res.(global.Return).Result
	}
	plan.MQ.Speed.ReadAvgTimeAdd(uint64(execTimeout))
	plan.MQ.Speed.ReadExecCountAdd()
	log.MainLog().Debug(fmt.Sprintf("(%d) %v Let the data query of table %v.%v be completed.", logSeq, event, plan.Object.Schema, plan.Object.Table))
	return
}
func chunkDataType(chunkData any) (result [][]*string) {
	switch chunkData.(type) {
	case [][]*string:
		result = chunkData.([][]*string)
	default:
		return
	}
	return
}
func getDstInsertManyValueSql(plan *SchedulePlan2, manyValue any) (row any, err error) {
	var result any
	var event = "[getDstInsertManyValueSql]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	s := full.NewFull([]string{GlobalPConfigs.dSns.DestDBName}, "right", plan.Object.Role)
	newParameter := plan.DstParameter
	newParameter.Meta = manyValue
	s.Parameter1 = []parDef.Parameter{newParameter}
	if result, err = s.GetFullInsertSqlST(); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	row = result
	return
}
func sendMsgInsertManyValueResult(row any, manyValue any) (result any, err error) {
	var sum int64
	if row == nil {
		return
	}
	switch manyValue.(type) {
	case [][]*string:
		sum = int64(len(manyValue.([][]*string)))
	}
	switch row.(type) {
	case *string:
		if len(*row.(*string)) > 0 {
			result = map[int64]*string{sum: row.(*string)}
		}
	case [][]*string:
		result = map[int64][][]*string{int64(len(row.([][]*string))): row.([][]*string)}
	case []*string:
		if len(row.([]*string)) > 0 {
			result = map[int64][]*string{sum: row.([]*string)}
		}
	default:
		err = ref.ErrAddPrintf("sendMsgInsertManyValueResult", errors.New(fmt.Sprintf("type not match. type is %v", reflect.TypeOf(manyValue))))
	}
	return
}

func fixSumMarge(fixSum int64, chunkData any) (ll [][][]*string) {
	var sum int64
	var mm [][]*string
	if int64(len(chunkDataType(chunkData))) <= fixSum {
		ll = append(ll, chunkDataType(chunkData))
		return
	}
	for _, v := range chunkDataType(chunkData) {
		sum++
		if sum <= fixSum {
			mm = append(mm, v)
		} else {
			ll = append(ll, mm)
			sum = 0
			mm = [][]*string{v}
		}
	}
	if len(mm) > 0 {
		ll = append(ll, mm)
	}
	mm = nil
	return
}

func generalInsertManyValue_old(message mq.Message) (generalChan chan any) {
	var event = "[generalInsertManyValue]"
	generalChan = make(chan any, GlobalPConfigs.rules.QueueSize)

	go func(generalChan1 chan any) {
		var wg sync.WaitGroup
		defer close(generalChan1)
		defer func() {
			if r := recover(); r != nil {
				recoverException(event)
			}
		}()
		var plan = message.Properties["plan"].(*SchedulePlan2)
		var chunkData = message.Properties["chunkData"]
		// 从全局配置中获取池大小，如果未设置则使用默认值
		//poolSize := GlobalPConfigs.rules.MaxWorkers
		var poolSize int
		if poolSize <= 0 {
			poolSize = 10 // 默认池大小
		}
		// 创建 goroutine 池
		pool, err := ants.NewPool(poolSize, ants.WithNonblocking(false))
		if err != nil {
			log.ErrorLog().Error(fmt.Sprintf("%v failed to create goroutine pool: %v", event, err))
			runErrorAction(message, err)
			return
		}
		defer pool.Release() // 确保池被释放

		for _, v := range fixSumMarge(plan.writeOptimizer.FixSum, chunkData) {
			if atomic.LoadInt64(plan.Status.ErrEnd) < 0 {
				return
			}
			vv := v
			wg.Add(1)
			err = pool.Submit(func() {
				defer wg.Done()
				// 再次检查错误状态，避免不必要的工作
				if atomic.LoadInt64(plan.Status.ErrEnd) < 0 {
					return
				}
				var rows any
				if rows, err = getDstInsertManyValueSql(plan, vv); err != nil {
					handleError(message, event, err)
					return
				}
				if rows, err = sendMsgInsertManyValueResult(rows, vv); err != nil {
					handleError(message, event, err)
					return
				}
				// 非阻塞发送结果到 channel
				select {
				case generalChan1 <- rows:
				default:
					// 如果 channel 满了，记录警告但不阻塞
					log.ErrorLog().Error(fmt.Sprintf("channel is full, dropping data for table %v.%v",
						plan.Object.Schema, plan.Object.Table))
				}
			})
			//wg.Add(1)
			//go func(vv [][]*string) {
			//	defer func() {
			//		wg.Done()
			//	}()
			//	var rows any
			//	var err error
			//	if rows, err = getDstInsertManyValueSql(plan, vv); err != nil {
			//		runErrorAction(message, ref.ErrAddPrintf(event, err))
			//		log.ErrorLog().Error(fmt.Sprintf("%v", ref.ErrAddPrintf(event, err)))
			//		return
			//	}
			//	if rows, err = sendMsgInsertManyValueResult(rows, vv); err != nil {
			//		runErrorAction(message, ref.ErrAddPrintf(event, err))
			//		log.ErrorLog().Error(fmt.Sprintf("%v", ref.ErrAddPrintf(event, err)))
			//		return
			//	}
			//	generalChan1 <- rows
			//}(v)
		}
		wg.Wait()
	}(generalChan)
	return
}
func generalInsertManyValue(message mq.Message) (res []any, err error) {
	var event = "[generalInsertManyValue]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
		if err != nil {
			handleError(message, event, err)
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var chunkData = message.Properties["chunkData"]
	for _, vv := range fixSumMarge(plan.writeOptimizer.FixSum, chunkData) {
		if atomic.LoadInt64(plan.Status.ErrEnd) < 0 {
			return
		}
		var rows any
		if rows, err = getDstInsertManyValueSql(plan, vv); err != nil {
			return
		}
		if rows, err = sendMsgInsertManyValueResult(rows, vv); err != nil {
			return
		}
		res = append(res, rows)
	}
	return
}
func writePageSet_old(message mq.Message, generalChan chan any) (generalMergeChan chan any) {
	generalMergeChan = make(chan any, GlobalPConfigs.rules.QueueSize)
	var event = "[writePageSet]"
	go func() {
		var resultSum, insertSum int64
		var resultGarth []*string
		//defer func() {
		//	if r := ref.RecoverPanic(event,recover());err == nil && r != nil {
		//		err = r
		//	}
		//}()
		var plan = message.Properties["plan"].(*SchedulePlan2)
		defer close(generalMergeChan)
		defer recoverException(event)
		for {
			select {
			case result, ok := <-generalChan:
				if !ok {
					if len(resultGarth) > 0 {
						generalMergeChan <- map[int64]any{insertSum: resultGarth}
					}
					return
				}
				switch result.(type) {
				case map[int64][]*string, map[int64][][]*string: //lob表
					generalMergeChan <- result
				case map[int64]*string: //非lob表
					for k, v := range result.(map[int64]*string) {
						insertSum += k
						resultGarth = append(resultGarth, v)
					}
					resultSum++
					//设置单个事务最大写入的页大小
					//一个页page大小为16kb
					//一个区extent包含64个连续的page == 1MB
					//一个段segment最多包含64个区 == 64M  此处限制16个区，一个事务写入16个区的数据，单个事务大小为16M
					if resultSum >= 64*atomic.LoadInt64(plan.writeOptimizer.Thread) {
						generalMergeChan <- map[int64]any{insertSum: resultGarth}
						resultSum = 0
						insertSum = 0
						resultGarth = []*string{}
					}
				}

			}
		}
	}()
	return
}
func writePageSet(message mq.Message, writeSql []any) (generalMergeChan []any, err error) {
	var event = "[writePageSet]"
	var resultSum, insertSum int64
	var resultGarth []*string
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	for _, sql := range writeSql {
		switch sql.(type) {
		case map[int64][]*string, map[int64][][]*string: //lob表
			generalMergeChan = append(generalMergeChan, sql)
		case map[int64]*string: //非lob表
			for k, v := range sql.(map[int64]*string) {
				insertSum += k
				resultGarth = append(resultGarth, v)
			}
			resultSum++
			//设置单个事务最大写入的页大小
			//一个页page大小为16kb
			//一个区extent包含64个连续的page == 1MB
			//一个段segment最多包含64个区 == 64M  此处限制16个区，一个事务写入16个区的数据，单个事务大小为16M
			if resultSum >= 64*atomic.LoadInt64(plan.writeOptimizer.Thread) {
				//generalMergeChan <- map[int64]any{insertSum: resultGarth}
				generalMergeChan = append(generalMergeChan, map[int64]any{insertSum: resultGarth})
				resultSum = 0
				insertSum = 0
				resultGarth = []*string{}
			}
		}
	}
	if len(resultGarth) > 0 {
		generalMergeChan = append(generalMergeChan, map[int64]any{insertSum: resultGarth})
	}
	return
}
func generalInsert(message mq.Message) (generalMergeChan []any, err error) {
	var event = "[generalInsert]"
	var writeSql []any
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	if writeSql, err = generalInsertManyValue(message); err != nil {
		return
	}
	generalMergeChan, err = writePageSet(message, writeSql)
	return
}

func writeInsert(message mq.Message) (err error) {
	var (
		event               = "[writeInsert]"
		execTimeout, result any
	)
	var plan = message.Properties["plan"].(*SchedulePlan2)
	log.MainLog().Debug(fmt.Sprintf("(%v) %v Table %v.%v starts executing the insert statement.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	switch is := message.Properties["insertSql"]; is.(type) {
	case []any:
		for _, v := range is.([]any) {
			insertSum, insertSql := insertValueSumTotal(v)
			newParameter := plan.DstParameter
			newParameter.Con, newParameter.Meta = GlobalPConfigs.db.Target.GetDB(message.Properties["sourcePartitionName"].(string)), insertSql
			c := full.NewFull([]string{GlobalPConfigs.dSns.DestDBName}, "right", plan.Object.Role)
			if result, err = c.GetFullInsertST(newParameter); err != nil {
				log.ErrorLog().Error(fmt.Sprintf("%v insert write rows data for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), ref.ErrAddPrintf(event, err)))
				runErrorAction(message, err)
				return
			}
			if result != nil {
				execTimeout = result.(global.Return).TimeOut
			}
			plan.MQ.Speed.WriteAvgTimeAdd(uint64(execTimeout.(int64)))
			plan.MQ.Speed.WriteExecCountAdd()
			switch result.(type) {
			case int64:
				if insertSum != result.(global.Return).Result.(int64) {
					err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("read chunk sum != write chunk sum. readSum:%v,writeSum:%v", insertSum, result.(global.Return).Result)))
					log.ErrorLog().Error(fmt.Sprintf("%v insert write rows data for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
					runErrorAction(message, err)
					return
				}
			}
			if barTerminalRowsUpdate(result.(global.Return).Result, "right", plan) != nil {
				err = ref.ErrAddPrintf(event, err)
				log.ErrorLog().Error(fmt.Sprintf("%v write rows sum update bar for table %v fail. error is {%v}", event, getTableName(plan.Object.Schema, plan.Object.Table), err))
				runErrorAction(message, err)
				return
			}
		}
	}
	log.MainLog().Debug(fmt.Sprintf("(%v) %v The insert statement of table %v.%v is completed. exec timeout is %v ms", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, execTimeout))
	return
}
