package syR

import (
	"errors"
	"fmt"
	"db2s/Fp"
	"db2s/Meta"
	"db2s/full"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/outPut"
	"db2s/parDef"
	"db2s/ref"
	mq "db2s/topic-mq"
	"reflect"
	"strconv"
	"strings"
	"sync/atomic"
	"time"
)

func getScheduleTaskTableObject(plan *SchedulePlan2, partition string) Fp.TableObject {
	return Fp.TableObject{
		Schema:        plan.Object.Schema,
		Table:         plan.Object.Table,
		PartitionName: partition,
		RowsLimit:     plan.Object.RowsLimit,
	}
}

func getScheduleTaskStaticParameter(plan *SchedulePlan2) Fp.StaticParameter {
	return Fp.StaticParameter{
		CpLength:    GlobalPConfigs.rules.QueueSize,
		MulFactor:   plan.ReadOptimizer.Thread,
		ChunkNumber: plan.ReadOptimizer.ChunkSum,
		Scn:         GlobalPConfigs.rules.Sync.Scn,
		QueryFilter: plan.TableAttributes.WhereAdd,
	}
}
func getPartNameToShardName(join, partition string, object Object) (name string, err error) {
	var drive []string
	var Parameter parDef.Parameter
	var res any
	var event = "[getPartNameToShardName]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	switch join {
	case "left":
		drive = []string{GlobalPConfigs.dSns.SrcDBName}
		Parameter = parDef.Parameter{Con: GlobalPConfigs.db.Source.GetDB(partition), Object: parDef.Object{Schema: object.Schema, Table: object.Table}, Meta: partition}
	case "right":
		drive = []string{GlobalPConfigs.dSns.DestDBName}
		Parameter = parDef.Parameter{Con: GlobalPConfigs.db.Target.GetDB(partition), Object: parDef.Object{Schema: object.MSchema, Table: object.MTable}, Meta: partition}
	default:
		err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("not miss match value %v", join)))
	}
	c := Meta.NewMeta(drive, join, object.Role)
	c.Parameter1 = []parDef.Parameter{Parameter}
	if res, err = c.GetPartNameToShardNameST(); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	switch res.(type) {
	case string:
		name = res.(string)
	default:
		err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("not miss match. curry type is %v", reflect.TypeOf(res))))
	}
	return
}

func getScheduleTaskDistribution(plan *SchedulePlan2, join string, partition string, timeout time.Duration) (result Fp.TaskDistribution, err error) {
	var errEnd int64 = 0
	var modifyNewParameter []parDef.Parameter
	var shardName string
	var drive []string
	var event = "[getScheduleTaskDistribution]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	if shardName, err = getPartNameToShardName(join, partition, plan.Object); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	switch join {
	case "left":
		newPar := plan.SrcParameter
		switch plan.baseMe.Col.(type) {
		case []any:
			newPar.Meta = plan.baseMe.Col.([]any)[0]
		default:
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("type not match. curry type is %v", reflect.TypeOf(plan.baseMe.Col))))
			return
		}
		newPar.Con, newPar.Object.Table, newPar.Object.Partition, newPar.Options.StopTime = GlobalPConfigs.db.Source.GetDB(partition), shardName, partition, timeout*time.Second
		drive = []string{GlobalPConfigs.dSns.SrcDBName}
		modifyNewParameter = []parDef.Parameter{newPar}
	case "right":
		newPar := plan.DstParameter
		switch plan.baseMe.Col.(type) {
		case []any:
			newPar.Meta = plan.baseMe.Col.([]any)[1]
			plan.DstParameter.ExecInsert.Columns = plan.baseMe.Col.([]any)[1].([]parDef.ColMetaMapS)
		default:
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("type not match. curry type is %v", reflect.TypeOf(plan.baseMe.Col))))
			return
		}
		newPar.Con, newPar.Object.Table, plan.DstParameter.Object.Table = GlobalPConfigs.db.Target.GetDB(partition), shardName, shardName
		newPar.Object.Partition, newPar.Options.StopTime = partition, timeout*time.Second
		drive = []string{GlobalPConfigs.dSns.DestDBName}
		modifyNewParameter = []parDef.Parameter{newPar}
	}
	if plan.DstParameter.ExecInsert.Prefix, err = getInsertPre(plan); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	result = Fp.TaskDistribution{
		LogSeq:     plan.Object.ObjectLogSeq,
		DataSource: plan.ReadOptimizer.ChunkStartSource,
		Parameter:  modifyNewParameter,
		Full:       full.NewFull(drive, "left", plan.Object.Role),
		StaticP:    getScheduleTaskStaticParameter(plan),
		ErrEnd:     &errEnd,
	}
	result.Full.Parameter1 = modifyNewParameter
	result.First.Result = make(chan any, GlobalPConfigs.rules.QueueSize)
	result.TableObject = getScheduleTaskTableObject(plan, partition)
	return
}
func SendMsgExecSumFunc(param any) global.EFunc {
	return global.EFunc{
		FuncName: func(event string, s any) (r any) {
			var point []any
			var val int64
			switch s.(type) {
			case []any:
				for k, v := range s.([]any) {
					switch k {
					case 0:
						switch reflect.TypeOf(v).String() {
						case "interface {}", "*int64":
							point = append(point, v)
						case "[]interface {}":
							for _, q := range v.([]any) {
								point = append(point, q)
							}
						}
					case 1:
						val = v.(int64)
					}
				}
			}
			for _, v := range point {
				switch v.(type) {
				case *int64:
					atomic.AddInt64(v.(*int64), val)
				}
			}
			return
		},
		Params: param,
	}
}
func SendMsgActionFunc(param any) global.EFunc {
	return global.EFunc{
		FuncName: func(event string, s any) (r any) {
			var check CheckMode
			var topic Topic
			var outFay string
			switch s.(type) {
			case map[string]any:
				if v, ok := param.(map[string]any)["outFay"]; ok {
					outFay = v.(string)
				}
				if v, ok := param.(map[string]any)["check"]; ok {
					check = v.(CheckMode)
				}
				if v, ok := param.(map[string]any)["topic"]; ok {
					topic = v.(Topic)
				}
			}
			PlanContext.topicEndAdd(check, topic)
			if strings.EqualFold(outFay, "bar") {
				PlanContext.taskBarEndTopicAdd(param)
			}
			return
		},
		Params: param,
	}
}

func missIndexDataFlayMargeOrderBY(taskObject Fp.TaskDistribution, plan *SchedulePlan2, flayData any) (err error) {
	var m [][]*string
	var i int64
	var vlog string
	var event = "[missIndexDataFlayMargeOrderBY]"
	defer func() {
		if re := recover(); re != nil {
			err = errors.New(fmt.Sprintf("%v defer recover An exception was captured. abnormal is %v ", event, re))
			log.ErrorLog().Error(fmt.Sprintf("%v", err))
		}
	}()
	for {
		vlog = fmt.Sprintf("(%d) %v Start dispos1 data of non-index table %v.%v ", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table)
		log.MainLog().Debug(vlog)
		select {
		case c, ok := <-flayData.(chan any):
			log.MainLog().Debug(fmt.Sprintf("(%d) %v Start dispos2 data of non-index table %v.%v of %v.%v", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, c, ok))
			if !ok {
				if len(m) > 0 {
					taskObject.First.Result <- m
				}
				log.MainLog().Info(fmt.Sprintf("(%d) %v Data reading of non-index table %v.%v is completed.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
				close(taskObject.First.Result)
				return
			} else {
				if c == nil {
					continue
				}
				//atomic.AddInt64(plan.TableSpeedLimit.ReadSum, 1)
				plan.MQ.Speed.ReadSumAdd(1)
				c1 := c.([]*string)
				e := atomic.LoadInt64(plan.Status.ErrEnd)
				if e < 0 {
					log.MainLog().Warn(fmt.Sprintf("(%d) %v Data reading from non-indexed table %v.%v terminates abnormally.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
					close(taskObject.First.Result)
					return
				}
				atomic.AddInt64(plan.TPod.Sync.SelectRows, 1)
				if atomic.LoadInt64(plan.Status.ErrEnd) > -1 {
					PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": int64(1), "insert": int64(0)})
				}
				i++
				if i > plan.writeOptimizer.FixSum {
					taskObject.First.Result <- m
					m = [][]*string{}
					i = 0
				}
				m = append(m, c1)
			}
		}
	}
}

func insertValueSumTotal(insertSuffix any) (insertSum int64, insertSql any) {
	if insertSuffix == nil {
		return
	}
	switch insertSuffix {

	}
	switch insertSuffix.(type) {
	case []any:
	case map[int64]any:
		for k, v := range insertSuffix.(map[int64]any) {
			insertSum = k
			insertSql = v
		}
	case map[int64][][]*string:
		for k, v := range insertSuffix.(map[int64][][]*string) {
			insertSum = k
			insertSql = v
		}
	}
	return
}
func getRowsSum(tt any) (sum int64) {
	switch tt.(type) {
	case int64:
		sum = tt.(int64)
	case []*string:
		sum = int64(len(tt.([]*string)))
	case [][]*string:
		sum = int64(len(tt.([][]*string)))
	}
	return
}
func barTerminalRowsUpdate(tt any, left string, plan *SchedulePlan2) (err error) {
	var event = "[barTerminalRowsUpdate]"
	defer func() {
		if r := recover(); r != nil {
			err = recoverCost(event, r)
		}
	}()
	if GlobalPConfigs.result.Teletypewriter != "bar" || PlanContext.TaskBarSubsStatusGet(plan.subTaskInfo) == -2 {
		return
	}
	switch left {
	case "left":
		plan.MQ.Speed.ReadSumAdd(getRowsSum(tt))
		atomic.AddInt64(plan.TPod.Sync.SelectRows, getRowsSum(tt))
		PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": getRowsSum(tt), "insert": int64(0)})
	case "right":
		plan.MQ.Speed.WriteSumAdd(getRowsSum(tt))
		atomic.AddInt64(plan.TPod.Sync.InsertRows, getRowsSum(tt))
		PlanContext.SubTaskBarAccumulate(map[string]any{"subTask": plan.subTaskInfo, "select": int64(0), "insert": getRowsSum(tt)})
	}
	return
}
func abnormalErrorQuit(plan *SchedulePlan2) bool {
	return atomic.LoadInt64(plan.Status.ErrEnd) < 0
}
func handleError(message mq.Message, event string, err error) {
	errMsg := ref.ErrAddPrintf(event, err)
	runErrorAction(message, errMsg)
	log.ErrorLog().Error(fmt.Sprintf("%v", errMsg))
}
func getXlsWriteTerminalFunc() map[string]reflect.Value {
	return map[string]reflect.Value{
		"sync": reflect.ValueOf(syncXlsWriterTerminalData),
	}
}
func xlsWriterTerminalData(plan *SchedulePlan2, logSeq int64) any {
	if funcName, ok := getXlsWriteTerminalFunc()[GlobalPConfigs.rules.CheckMode]; ok {
		if funcName.Kind() == reflect.Func {
			args := []reflect.Value{
				reflect.ValueOf(plan),
				reflect.ValueOf(logSeq),
			}
			s := funcName.Call(args)
			return s[0].Interface()
		}
	}
	return nil
}
func analyzeTable(plan *SchedulePlan2, logSeq int64) (err error) {
	c := Meta.NewMeta([]string{GlobalPConfigs.dSns.SrcDBName, GlobalPConfigs.dSns.DestDBName}, "right", "analyze")
	c.Parameter1 = append(c.Parameter1, plan.SrcParameter)
	c.Parameter1 = append(c.Parameter1, plan.DstParameter)
	if err = c.DDLObjectDrop(); err != nil {
		//errorAction(plan.Object.ObjectLogSeq, err)
		//errorActive(plan, err)
		//PlanContext.TPodTableStatusSwap(plan.TPod.Status, "error")
		//log.ErrorLog().Error(fmt.Sprintf("(%v) %v Object %v failed to be deleted at the target end!!! error Msg: %v ", plan.Object.ObjectLogSeq, event, plan2.Object.ObjectLogSeq, err))
		return
	}
	return
}
func singleTableFinishAnalyzeAction(plan *SchedulePlan2) (err error) {
	var event = "[singleTableFinishAnalyzeAction]"
	log.MainLog().Debug(fmt.Sprintf("(%v) %v begin exec analyze table of table %v.%v. analyze options is {%v}", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, GlobalPConfigs.rules.Sync.Analyze))
	if GlobalPConfigs.rules.Sync.Analyze {
		if err = analyzeTable(plan, plan.Object.ObjectLogSeq); err != nil {
			log.MainLog().Warn(fmt.Sprintf("(%v) %v analyze table fail of table %v.%v. error is {%v}", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, err))
		}
		log.MainLog().Info(fmt.Sprintf("(%v) %v analyze table successful of table %v.%v.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	}
	return
}
func singleTableFinishXlsWriteAction(plan *SchedulePlan2) (err error) {
	var event = "[singleTableFinishXlsWriteAction]"
	log.MainLog().Debug(fmt.Sprintf("(%v) %v begin exec finish object xls write of table %v.%v .", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	if err1 := xlsWriterTerminalData(plan, plan.Object.ObjectLogSeq); err1 != nil {
		err = err1.(error)
		log.MainLog().Warn(fmt.Sprintf("(%v) %v exec finish object xls write fail of table %v.%v. error is {%v}", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table, err))
		return
	}
	log.MainLog().Info(fmt.Sprintf("(%v) %v exec finish object xls write successful of table %v.%v.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	return
}
func terminalOutputTPod(plan *SchedulePlan2) {
	if e := atomic.LoadInt64(plan.Status.ErrEnd); e < 0 {
		plan.TPod.Result = "error"
	}
	newTer.Write(plan.TPod)
	plan.XlsTerminal.Data = *plan.TPod
	newCell.Write(plan.XlsTerminal)
}
func getTableCost(plan *SchedulePlan2) (tableCost string) {
	tableCost = global.Second(plan.Status.SubTaskBeginTime, time.Now().UnixNano())
	return
}
func getMigrationRate(plan *SchedulePlan2) (migrationRate string) {
	var tableCostInt float64
	tableCostInt, _ = strconv.ParseFloat(getTableCost(plan), 10)
	migrationRate = global.Commercial(*plan.subTaskInfo.DAccumulate, int64(tableCostInt))
	return
}
func getTableDataSize(plan *SchedulePlan2) (tableDataSize map[string]float64) {
	tableDataSize = make(map[string]float64)
	//for k, v := range map[string]*sql.DB{
	//	GlobalPConfigs.dSns.SrcDBName:  GlobalPConfigs.db.Source.GetDB("single"),
	//	GlobalPConfigs.dSns.DestDBName: GlobalPConfigs.db.Target.GetDB("single"),
	//} {
	//	t, err := Er.MetaInfo(Er.TableInfoMeta{DBType: k})
	//	if err != nil {
	//		return
	//	}
	//	dataSize, err := t.DataSize(global.TablesMetaInfoInput{
	//		DB:        v,
	//		TableInfo: global.TableInfo{Schema: plan.Object.Schema, Table: plan.Object.Table, BackendTableName: plan.Object.Table},
	//	})
	//	if err != nil {
	//		return
	//	}
	//	tableDataSize[k] = dataSize.DataSizeMB + dataSize.IndexSizeMB
	//}
	return
}
func syncXlsWriterTerminalData(plan *SchedulePlan2, logSeq int64) error {
	var (
		event      = "[xlsWriterTerminalData]"
		syncStatus string
	)
	log.MainLog().Debug(fmt.Sprintf("(%d) %v Start writing the execution results of table %v.%v into xls...", logSeq, event, plan.Object.Schema, plan.Object.Table))
	terminalOutputTPod(plan)
	tableSize := getTableDataSize(plan)
	plan.XlsResult.Data.Sync = outPut.SyncResultSheet{SyncSum: plan.subTaskInfo.RecordCount, SelectRows: plan.subTaskInfo.SAccumulate, InsertRows: plan.subTaskInfo.DAccumulate,
		SyncStatus: syncStatus, TableCost: getTableCost(plan), MigrationRate: getMigrationRate(plan), SourceDataSize: fmt.Sprintf("%v", tableSize[GlobalPConfigs.dSns.SrcDBName]), DestDataSize: fmt.Sprintf("%v", tableSize[GlobalPConfigs.dSns.DestDBName]),
		DataExpansionRatio: global.Commercial(int64(tableSize[GlobalPConfigs.dSns.DestDBName]), int64(tableSize[GlobalPConfigs.dSns.SrcDBName]))}
	plan.XlsResult.Data.Comment = fmt.Sprintf("%v", plan.subTaskInfo.ErrorInfo.Load())
	newCell.Write(*plan.XlsResult)
	log.MainLog().Debug(fmt.Sprintf("(%d) %v The execution result of table %v.%v is sent successfully. result is {%v}", logSeq, event, plan.Object.Schema, plan.Object.Table, plan.TPod))
	return nil
}
func mqStatusMonitorAbnormalQuit(event string, i any) (r any) {
	var plan *SchedulePlan2
	switch i.(type) {
	case *SchedulePlan2:
		plan = i.(*SchedulePlan2)
	default:
		return
	}
	_ = singleTableFinishAnalyzeAction(plan)
	_ = singleTableFinishXlsWriteAction(plan)
	return
}
func mqStatusMonitorNormalQuit(event string, i any) (r any) {
	var plan *SchedulePlan2
	//var event = "[mqStatusMonitorNormalQuit]"
	defer func() {
		PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
		log.MainLog().Info(fmt.Sprintf("(%v) %v Table %v.%v data migration is completed.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	}()
	switch i.(type) {
	case *SchedulePlan2:
		plan = i.(*SchedulePlan2)
	default:
		return
	}
	log.MainLog().Debug(fmt.Sprintf("(%d) %v It is detected that the message queue production quantity and consumption quantity in table %v.%v are equal.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	_ = singleTableFinishAnalyzeAction(plan)
	_ = singleTableFinishXlsWriteAction(plan)
	return
}

func getMsgSendData(plan *SchedulePlan2, si map[string]any) (res map[string]any) {
	res = map[string]any{
		"plan":    plan,
		"check":   getCheckMod(),
		"subTask": plan.subTaskInfo,
		"err":     plan.Status.ErrEnd,
		"outFay":  GlobalPConfigs.result.Teletypewriter,
	}
	for k, v := range si {
		res[k] = v
	}
	return
}
