package ckR

import (
	"db2s/Fp"
	"db2s/arg"
	ea "db2s/encryptionAlgorithm"
	"db2s/full"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/outPut"
	"db2s/parDef"
	"db2s/ref"
	mq "db2s/topic-mq"
	"fmt"
	"os"
	"reflect"
	"strings"
	"sync/atomic"
)

func podRowsInit() outPut.Rows {
	var rowsSum, insertNumber, updateNumber, deleteNumber, sourceNumber, destNumber int64 = 0, 0, 0, 0, 0, 0
	return outPut.Rows{
		RowsSum:      &rowsSum,
		InsertSum:    &insertNumber,
		UpdateSum:    &updateNumber,
		DeleteSum:    &deleteNumber,
		SourceNumber: &sourceNumber,
		DestNumber:   &destNumber,
	}
}

// table sum pod Init
func (t CheckSchemaMetaData) podInit(seq int, v arg.InputTableAttributes) (result *outPut.TPod, err error) {
	var status int64
	result = &outPut.TPod{Seq: seq, Schema: v.Schema, Table: v.Table, CheckMode: GlobalPConfigs.rules.CheckMode, IndexName: "missIndex", IndexType: "none", IndexCol: "none", Status: &status, Result: "ok"}
	switch GlobalPConfigs.rules.CheckMode {
	case "rows":
		result.Options = GlobalPConfigs.rules.RowsR.Options
		result.Rows = podRowsInit()
	}
	return
}
func speedAdd(event string, plan1 any) (res any) {
	plan := plan1.(*SchedulePlan2)
	if atomic.LoadInt64(plan.ReadOptimizer.Thread) > 5 {
		atomic.SwapInt64(plan.ReadOptimizer.Thread, atomic.LoadInt64(plan.ReadOptimizer.Thread)-1)
	}
	if atomic.LoadInt64(plan.writeOptimizer.Thread) > 1 {
		atomic.SwapInt64(plan.writeOptimizer.Thread, atomic.LoadInt64(plan.writeOptimizer.Thread)-1)
	}
	return
}
func speedDel(event string, plan1 any) (res any) {
	plan := plan1.(*SchedulePlan2)
	atomic.SwapInt64(plan.ReadOptimizer.Thread, atomic.LoadInt64(plan.ReadOptimizer.Thread)+1)
	atomic.SwapInt64(plan.writeOptimizer.Thread, atomic.LoadInt64(plan.writeOptimizer.Thread)+1)
	return
}
func (plan *SchedulePlan2) speedResSend(event string, plan1 any) (res any) {
	if GlobalPConfigs.result.Teletypewriter == "bar" {
		if PlanContext.TaskBarSubsStatusGet(plan.subTaskInfo) != -2 {
			PlanContext.SpeedTaskBarAccumulate(plan1)
		}
	}
	return
}
func speedInit(plan *SchedulePlan2) mq.SpeedLimit {
	q := mq.NewSpeedMonitor(plan.Object.Schema, plan.Object.Table, plan.ReadOptimizer.ChunkSum)
	q.AvgRowsSize = plan.writeOptimizer.AvgRowsSize
	q.SpeedAddFunc = global.EFunc{FuncName: speedAdd, Params: plan}
	q.SpeedDelFunc = global.EFunc{FuncName: speedDel, Params: plan}
	q.SendResFunc = global.EFunc{FuncName: plan.speedResSend}
	return q
}

// writeOptimizer 目标端写入的相关优化
func writeOptimizer(plan *SchedulePlan2) {
	var (
		thread int64 = 1
		event        = "[writeOptimizer]"
		err    error
		result any
	)
	plan.writeOptimizer.Thread = &thread
	s := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName}, "left", plan.Object.Role)
	s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
	if result, err = s.GetFullAvgRowsSizeST(); err != nil || result == nil {
		log.MainLog().Warn(fmt.Sprintf("(%v) %v dest write optimizer fail. error is {%v}. it will use default value.", plan.Object.ObjectLogSeq, event, err))
		plan.writeOptimizer.FixSum = 10
		plan.ReadOptimizer.ChunkSum = 10
		return
	}
	var b int64
	if result.(int64) > 0 {
		b = 16 * 1024 * 64 / result.(int64)
	}
	plan.writeOptimizer.AvgRowsSize = result.(int64)
	if b > 0 {
		plan.writeOptimizer.FixSum = b
		plan.ReadOptimizer.ChunkSum = b
		infoWriteInfo(fmt.Sprintf("(%v) %v dest write optimizer chunkSum is {%v},avg length result is {%v}", plan.Object.ObjectLogSeq, event, b, result.(int64)))
	} else {
		plan.writeOptimizer.FixSum = 10
		plan.ReadOptimizer.ChunkSum = 10
		warnWriteInfo(fmt.Sprintf("(%v) %v dest chunk sum result is 0.avg length result is {%v}, it will use default value.", plan.Object.ObjectLogSeq, event, result.(int64)))
	}
	plan.writeOptimizer.Thread = &thread
}

// readOptimizer 目标端写入的相关优化
func readOptimizer(plan2 *SchedulePlan2) {
	var thread int64 = 1
	plan2.ReadOptimizer.ChunkStartSource = "src"
	plan2.ReadOptimizer.Thread = &thread
}

func getTableCol(re any) (srcCol, dstCol []parDef.ColMetaMapS) {
	switch re.(type) {
	case []any:
		if re.([]any)[0] != nil {
			for _, v := range re.([]any)[0].([]parDef.ColMetaMapS) {
				srcCol = append(srcCol, v)
			}
		}
		if re.([]any)[1] != nil {
			for _, v := range re.([]any)[1].([]parDef.ColMetaMapS) {
				dstCol = append(dstCol, v)
			}
		}
	}
	return
}
func getTableColName(qq []parDef.ColMetaMapS) (result []string) {
	for _, v := range qq {
		result = append(result, v.ColumnName)
	}
	return
}
func tableMetaArr1(plan *SchedulePlan2) bool {
	sNew, dNew := getTableCol(plan.baseMe.Col)
	add, eq, del := ea.CheckSumTypeStruct{}.LowerArrCmp(getTableColName(sNew), getTableColName(dNew))
	if !GlobalPConfigs.rules.Sync.IgnoreColumnLength && (len(add) > 0 || len(del) > 0) {
		return false
	}
	if len(eq) == 0 {
		return false
	}
	if len(add) == 0 && len(del) == 0 {
		return true
	}
	return true
}
func tableStatusInit(plan2 *SchedulePlan2) {
	var (
		errEnd int64 = 0
		logSeq int64
	)
	plan2.Status.LogSeq = logSeq
	plan2.Status.ErrEnd = &errEnd
}

func initDataFixFile(p string, plan *SchedulePlan2) {
	var err error
	switch GlobalPConfigs.repair.FixFileWay {
	case "split":
		plan.repair.FixFileDml = arg.C(p, fmt.Sprintf("%s_%s", plan.Object.Table, GlobalPConfigs.repair.FixFileDml))
		if plan.repair.FixFileDmlF, err = os.OpenFile(plan.repair.FixFileDml, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666); err != nil {
			fmt.Println(err)
			return
		}
	case "many":
		//plan.repair.FixFileInsert = inputArg.C(p, GlobalPConfigs.repair.FixFileInsert)
		//if plan.repair.FixFileInsertF, err = os.OpenFile(plan.repair.FixFileInsert, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666); err != nil {
		//	fmt.Println(err)
		//	return
		//}
	}
}
func initDataFixFileAbnormalResult(p string, plan *SchedulePlan2) {
	var err error
	switch GlobalPConfigs.repair.FixFileWay {
	case "split":
		plan.repair.AbnormalResult = arg.C(p, fmt.Sprintf("%s_%s", plan.Object.Table, GlobalPConfigs.repair.AbnormalResult))
		if plan.repair.AbnormalResultF, err = os.OpenFile(plan.repair.AbnormalResult, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666); err != nil {
			fmt.Println(err)
			return
		}
	case "many":
		plan.repair.AbnormalResult = arg.C(p, fmt.Sprintf("%s_%s", plan.Object.Table, GlobalPConfigs.repair.AbnormalResult))
		if plan.repair.AbnormalResultF, err = os.OpenFile(plan.repair.AbnormalResult, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0666); err != nil {
			fmt.Println(err)
			return
		}
	}
}
func InitDataFixFile(plan *SchedulePlan2) {
	var p string
	var b bool
	var err error
	if p, b, err = arg.PathExists(GlobalPConfigs.repair.FixFilePathBase, fmt.Sprintf("%s", plan.Object.Schema)); !b || err != nil {
		return
	}
	initDataFixFile(p, plan)
	initDataFixFileAbnormalResult(p, plan)
}

func sampInitDataFixFile(plan *SchedulePlan2) {
	var p string
	var b bool
	var err error
	if p, b, err = arg.PathExists(GlobalPConfigs.repair.FixFilePathBase, fmt.Sprintf("%s", plan.Object.Schema)); !b || err != nil {
		return
	}
	initDataFixFileAbnormalResult(p, plan)
}

func initMissIndexTmpFile(plan *SchedulePlan2) {
	//var (
	//	err error
	//)
	//if len(plan.indexColumn) == 0 {
	//	tmpBase := "tmp"
	//	plan.md5UniqMap = cmap.New[Animal]()
	//	if !global.FilePathIsNotExist(tmpBase) {
	//		return
	//	}
	//	plan.tmpFileF.tmpDataFileName = fmt.Sprintf("%v/%v%v.tmpData", tmpBase, plan.schema, plan.table)
	//	plan.tmpFileF.tmpMd5FileName = fmt.Sprintf("%v/%v%v.tmpMd5", tmpBase, plan.schema, plan.table)
	//	if plan.tmpFileF.tmpDataFileF, err = global.FileIsNotExist(plan.tmpFileF.tmpDataFileName); err != nil {
	//		fmt.Println("--------1111:", err)
	//		return
	//	}
	//	if plan.tmpFileF.tmpMd5FileF, err = global.FileIsNotExist(plan.tmpFileF.tmpMd5FileName); err != nil {
	//		fmt.Println("--------1111:", err)
	//		return
	//	}
	//}
}

func initSampSchedulePlan(s *SchemaInitContext) map[string]*SchedulePlan2 {
	var (
		m = make(map[string]*SchedulePlan2)
		//event        string
		//logThreadSeq int64
	)
	//global.Wlog.Debug(fmt.Sprintf("(%d) %s initCountSchedulePlan Rows model init, table info is %v", 1, event, s.TableList))
	//for _, v := range s.TableList {
	//	f0 := strings.Split(v, global.SchemaTableSplit)
	//	schema := f0[0]
	//	tab := f0[1]
	//	md := metaD.GetTableStrconvO()
	//	md.Schema = schema
	//	md.Table = tab
	//	md.TableMap = GlobalPConfigs.schema.MapTables
	//	mschema, mtable := metaD.SchemaTableCorver(*md)
	//	//原目标端是否存在
	//	continueTrue := false
	//	for _, v1 := range []string{v, fmt.Sprintf("%v%s%v", mschema, global.SchemaTableSplit, mtable)} {
	//		if _, ok := s.TcolumnMeta[v1]; !ok {
	//			global.Wlog.Warn(fmt.Sprintf("(%d) %v table %v not is exist!,will continue.", logThreadSeq, event, v1))
	//			continueTrue = true
	//		}
	//	}
	//	if continueTrue {
	//		continue
	//	}
	//	var errCode int64 = 0
	//	var srcCount uint64 = 0
	//	var dstCount uint64 = 0
	//	var sampRows uint64 = 0
	//	var sampChunk uint64 = 0
	//	var srcChunkSqlSum uint64 = 0
	//	var tokenBucketRich uint64 = 0
	//	plan2 := &SchedulePlan2{
	//		schema:  schema,
	//		table:   tab,
	//		mschema: fmt.Sprintf("%v", mschema),
	//		mtable:  fmt.Sprintf("%v", mtable),
	//		dbf: dbExec.DataAbnormalFixStruct{Schema: fmt.Sprintf("%v", mschema), Table: fmt.Sprintf("%v", mtable), DestDevice: GlobalPConfigs.dSns.DestDrive,
	//			DataFixType: GlobalPConfigs.repair.DataFix, DBType: GlobalPConfigs.dSns.DestDBName, AppTable: dbExec.AppTableS{
	//				IgnoreColumnLength: GlobalPConfigs.rules.RowsR.IgnoreColumnLength,
	//			},
	//		},
	//		tableLimit:   int64(GlobalPConfigs.rules.ChanRowCount * GlobalPConfigs.rules.Mcc),
	//		tableColData: TableAllColumnInfoS{SColumnInfo: s.TcolumnMeta[v]["source"].ColumnM, DColumnInfo: s.TcolumnMeta[getSchemaToTableMap(mschema, mtable)]["dest"].ColumnM},
	//		sshardName:   s.TDistributed["source"][v],
	//		dshardName:   s.TDistributed["dest"][fmt.Sprintf("%v%v%v", mschema, global.SchemaTableSplit, mtable)],
	//		//samp: global.SampPlan{TableSum: map[string]any{"source": dbExec.TableSum{DBType: GlobalPConfigs.dSns.SrcDBName, Schema: schema, Table: tab, Scn: GlobalPConfigs.rules.SampR.Scn, ShardName: "single"},
	//		//	"dest": dbExec.TableSum{DBType: GlobalPConfigs.dSns.DestDBName, Schema: fmt.Sprintf("%v", mschema), Table: fmt.Sprintf("%v", mtable), Scn: GlobalPConfigs.rules.SampR.Scn, ShardName: "single"},
	//		//}, PartitionData: s.TablePartiton[v], CheckRows: global.Count{SrcCount: &srcCount, DstCount: &dstCount, SampCount: &sampRows, SampChunk: &sampChunk, ChunkSqlSum: &srcChunkSqlSum, TokenBucketRich: &tokenBucketRich}},
	//		samp: global.SampPlan{PartitionData: s.TablePartiton[v], CheckRows: global.Count{SrcCount: &srcCount, DstCount: &dstCount, SampCount: &sampRows, SampChunk: &sampChunk, ChunkSqlSum: &srcChunkSqlSum, TokenBucketRich: &tokenBucketRich}},
	//
	//		columnSeq:        s.TcolumnMeta[v]["source"].ColumnS,
	//		chunkStartSource: "all",
	//		errEnd:           &errCode,
	//	}
	//	itype, icolumn, _ := indexInit1(s.IndexData, v)
	//	indexInit(s.IndexData, plan2, v)
	//	//general sql
	//	plan2.samp.GeneralSql = global.GeneralSql{
	//		DBtype:             GlobalPConfigs.dSns.DestDBName,
	//		Schema:             fmt.Sprintf("%v", mschema),
	//		Table:              fmt.Sprintf("%v", mtable),
	//		IgnoreColumnLength: GlobalPConfigs.rules.SampR.IgnoreColumnLength,
	//		IndexType:          itype,
	//		IndexColumn:        icolumn,
	//		Esa:                GlobalPConfigs.schema.EncryptionProperties[v],
	//	}
	//	//general Apply
	//	plan2.samp.ApplySql = global.ApplySql{
	//		DBtype:    GlobalPConfigs.dSns.DestDBName,
	//		Schema:    fmt.Sprintf("%v", mschema),
	//		Table:     fmt.Sprintf("%v", mtable),
	//		DB:        GlobalPConfigs.DDB,
	//		SqlMode:   GlobalPConfigs.rules.SampR.SqlMode,
	//		SqlLogBin: GlobalPConfigs.rules.SampR.SqlLogBin,
	//	}
	//	plan2.idxc = dbExec.IndexColumnStruct{Schema: schema, Table: tab,
	//		Drivce: GlobalPConfigs.dSns.SrcDrive, ColData: plan2.tableColData.SColumnInfo, DBType: GlobalPConfigs.dSns.SrcDBName, ColumnName: plan2.ReadOptimizer.IndexColumn,
	//		TableColumn: plan2.tableColData.SColumnInfo, ShardName: plan2.sshardName,
	//	}
	//	plan2.dstIdxc = dbExec.IndexColumnStruct{Schema: fmt.Sprintf("%v", mschema), Table: fmt.Sprintf("%v", mtable),
	//		Drivce: GlobalPConfigs.dSns.DestDrive, ColData: plan2.tableColData.DColumnInfo, DBType: GlobalPConfigs.dSns.DestDBName, ColumnName: plan2.ReadOptimizer.IndexColumn,
	//		TableColumn: plan2.tableColData.DColumnInfo, ShardName: plan2.dshardName,
	//	}
	//	tableRowsInit(plan2, 1)
	//	planAccount(plan2)
	//	sampInitDataFixFile(plan2)
	//	tailMessageInit(plan2)
	//	abnormalDataAnalyzeMessageInit(plan2)
	//	initMissIndexTmpFile(plan2)
	//	subTaskInit(plan2)
	//	m[plan2.SchemaToTable()] = plan2
	//	global.Wlog.Debug(fmt.Sprintf("(%d) %s initCountSchedulePlan table %s.%s init finish. schedule_plan is %v", 1, event, schema, tab, plan2))
	//}
	//for _, v := range s.TableMiss {
	//	f0 := strings.Split(v, global.SchemaTableSplit)
	//	schema := f0[0]
	//	table := f0[1]
	//	m[fmt.Sprintf("%v.%v", schema, table)] = &SchedulePlan2{}
	//}
	return m
}
func initXlsResult(s *SchemaInitContext) *outPut.WorkSheetResult {
	return &outPut.WorkSheetResult{
		Data: outPut.SheetResultData{
			Seq:    s.Seq,
			Schema: s.Schema,
			Table:  s.Table,
			Role:   s.Role,
			Struct: outPut.StructResultSheet{},
		},
	}
}
func initXlsMiss(s *SchemaInitContext) *outPut.WorkSheetMiss {
	return &outPut.WorkSheetMiss{
		Data: outPut.SheetMiss{
			Schema: s.Schema,
			Table:  s.Table,
			Role:   s.Role,
		},
	}
}
func initSendMsg(plan *SchedulePlan2) {
	plan.MQ.SendMsg = mq.ChanSendMsg{
		Table:             plan.Object.Table,
		MsgData:           plan.MQ.SendMsg.BaseMsg,
		MsgSuccessfulFunc: SendMsgExecSumFunc([]any{plan.MQ.FirstMsg.ProductMsg, int64(1)}),
		MsgFinishFunc:     SendMsgExecSumFunc([]any{[]any{plan.MQ.FirstMsg.CustomerObject}, int64(1)}),
		MsgProducer:       producer,
	}
}
func initRowsSchedulePlanObject(s *SchemaInitContext) Object {
	return Object{TaskSeq: s.Seq, ObjectLogSeq: s.ObjectLogSeq, Schema: s.Schema, Table: s.Table, MSchema: s.MSchema, MTable: s.MTable}
}
func optionsInit(s *SchemaInitContext) parDef.Options {
	return parDef.Options{
		Scn: GlobalPConfigs.rules.RowsR.Scn,
		//StopTime: time.Duration(GlobalPConfigs.rules.Sync.),
		WhereSql:     s.TableAttributes.WhereSql,
		WhereAdd:     Fp.ParseWhereClause(s.TableAttributes.WhereAdd),
		SqlMode:      GlobalPConfigs.rules.RowsR.SqlMode,
		TablePx:      s.TableAttributes.PX,
		SqlLogBin:    GlobalPConfigs.rules.RowsR.SqlLogBin,
		RepairMethod: GlobalPConfigs.repair.DataFix,
	}
}

func colLobTypeIf(col any) bool {
	switch col.(type) {
	case []parDef.ColMetaMapS:
		for _, v := range col.([]parDef.ColMetaMapS) {
			switch {
			case strings.EqualFold(v.TypeBelong, "lob"):
				return true
			case strings.EqualFold(v.TypeBelong, "longtext"):
				return true
			}
		}
	}
	return false
}
func srcParameterInit(s *SchemaInitContext) parDef.Parameter {
	var sourceColMeta any
	switch s.BaseMe.Col.(type) {
	case []any:
		sourceColMeta = s.BaseMe.Col.([]any)[0]
	}
	return parDef.Parameter{
		Con: GlobalPConfigs.db.Source.GetDB("single"),
		Object: parDef.Object{Schema: s.Schema, Table: s.Table, LobLogo: colLobTypeIf(sourceColMeta),
			Index: func() (res parDef.IndexColumnMe) {
				if s.BaseMe.Index == nil {
					return
				}
				return s.BaseMe.Index.(parDef.IndexColumnMe)
			}(),
			IndexCol: func() (result []string) {
				if s.BaseMe.Index == nil {
					return
				}
				for _, v := range s.BaseMe.Index.(parDef.IndexColumnMe).ColumnMate {
					result = append(result, v.ColumnName)
				}
				return
			}(),
			LockCol: parDef.Encryption{
				Schema:     s.Schema,
				LockFunc:   s.TableAttributes.SecColumn.LockFuncName,
				UnlockFunc: s.TableAttributes.SecColumn.UnLockFuncName,
				Column:     s.TableAttributes.SecColumn.ColumnSlice,
			},
		},
		Options: optionsInit(s),
	}
}
func dstParameterInit(s *SchemaInitContext) parDef.Parameter {
	var destColMeta any
	switch s.BaseMe.Col.(type) {
	case []any:
		destColMeta = s.BaseMe.Col.([]any)[1]
	}
	return parDef.Parameter{
		Con: GlobalPConfigs.db.Target.GetDB("single"),
		Object: parDef.Object{Schema: s.MSchema, Table: s.MTable, LobLogo: colLobTypeIf(destColMeta),
			Column: func() (res []parDef.ColMetaMapS) {
				if destColMeta == nil {
					return
				}
				return destColMeta.([]parDef.ColMetaMapS)
			}(),
			LockCol: parDef.Encryption{
				LockFunc:   s.TableAttributes.SecColumn.LockFuncName,
				UnlockFunc: s.TableAttributes.SecColumn.UnLockFuncName,
				Column:     s.TableAttributes.SecColumn.ColumnSlice,
			},
		},
		Options:    optionsInit(s),
		ExecInsert: parDef.InsertSql{},
	}
}
func newRowsSchedulePlan(s *SchemaInitContext) *SchedulePlan2 {
	var jhSum int64 = 0
	plan2 := &SchedulePlan2{
		Object:          initRowsSchedulePlanObject(s),
		SrcParameter:    srcParameterInit(s),
		DstParameter:    dstParameterInit(s),
		XlsResult:       initXlsResult(s),
		XlsMiss:         initXlsMiss(s),
		XlsTerminal:     &outPut.WorkSheetTerminal{},
		TPod:            s.TPod,
		rows:            global.RowsPlan{CheckMod: "rows", CheckRows: global.SyncCheckCount{SrcCheckCount: &jhSum}},
		TableAttributes: s.TableAttributes,
	}
	tableStatusInit(plan2)
	return plan2
}
func notSupportColumnType(plan *SchedulePlan2) bool {
	sNew, _ := getTableCol(plan.baseMe.Col)
	for _, v := range sNew {
		if strings.EqualFold(v.TypeBelong, "gis") { //oracle
			return false
		}
	}
	return false
}
func rowsSchedulePlanTableMetaInit(plan2 *SchedulePlan2) {
	if !tableMetaArr1(plan2) {
		atomic.SwapInt64(plan2.Status.ErrEnd, -1)
		plan2.Status.ErrInfo = "error(column != length)"
		plan2.TPod.Result = "error(column != length)"
		atomic.SwapInt64(plan2.TPod.Status, -1)
	}
	if notSupportColumnType(plan2) {
		atomic.SwapInt64(plan2.Status.ErrEnd, -1)
		plan2.Status.ErrInfo = "error(notSupportColumnType)"
		plan2.TPod.Result = "error(notSupportColumnType)"
		atomic.SwapInt64(plan2.TPod.Status, -1)
	}
}
func partLikeFilter(forcePartName string, eq []string) (res []string) {
	for _, v := range eq {
		v = strings.ToLower(v)
		forcePartName = strings.ToLower(forcePartName)
		switch {
		case strings.HasPrefix(forcePartName, "%") && !strings.HasSuffix(forcePartName, "%"): //%part
			if strings.HasSuffix(v, strings.ReplaceAll(forcePartName, "%", "")) {
				res = append(res, v)
			}
		case !strings.HasPrefix(forcePartName, "%") && strings.HasSuffix(forcePartName, "%"): //part%
			if strings.HasPrefix(v, strings.ReplaceAll(forcePartName, "%", "")) {
				res = append(res, v)
			}
		case strings.HasPrefix(forcePartName, "%") && strings.HasSuffix(forcePartName, "%"): //%part%
			if strings.Contains(v, strings.ReplaceAll(forcePartName, "%", "")) {
				res = append(res, v)
			}
		case strings.Contains(forcePartName, "%"): //part%01
			var before, after string
			if n := strings.Index(forcePartName, "%"); n != -1 {
				before = forcePartName[:n]
				after = forcePartName[n+1:]
			}
			if strings.Contains(v, before) && strings.Contains(v, after) {
				res = append(res, v)
			}
		default:
			if strings.EqualFold(forcePartName, v) {
				res = append(res, v)
			}
		}
	}
	return
}
func getSrcDstPartMeta(s *SchemaInitContext) (srcPart, dstPart parDef.PartMetaData) {
	if s.BaseMe.Part == nil {
		return
	}
	for k, v := range s.BaseMe.Part.([]any) {
		switch v.(type) {
		case parDef.PartMetaData:
			if k == 0 {
				srcPart = v.(parDef.PartMetaData)
			} else if k == 1 {
				dstPart = v.(parDef.PartMetaData)
			}
		}
	}
	return
}
func ignorePartGarth(partName []string, ignorePartName []string) (qq []string) {
	add, _, del := ea.CheckSum().LowerArrCmp(partName, ignorePartName)
	for _, v := range partName {
		for _, addV := range add {
			if strings.EqualFold(addV, v) {
				qq = append(qq, addV)
			}
		}
		for _, delV := range del {
			if strings.EqualFold(delV, v) {
				qq = append(qq, delV)
			}
		}
	}
	return
}
func partMetaInit(s *SchemaInitContext, plan2 *SchedulePlan2) {
	var forcePartName = strings.TrimSpace(s.TableAttributes.ForcePart)
	var SpecifiedPartition, IgnorePartition = s.TableAttributes.SpecifiedPartition, s.TableAttributes.IgnorePartition
	srcPart, dstPart := getSrcDstPartMeta(s)
	_, eq, _ := ea.CheckSum().LowerArrCmp(srcPart.PartName, dstPart.PartName)
	switch {
	case (forcePartName == "normal") || (srcPart.Meth.First != dstPart.Meth.First) || (len(eq) == 0):
		plan2.Object.PartName = []string{"single"}
	case len(forcePartName) > 0:
		_, plan2.Object.PartName, _ = ea.CheckSum().LowerArrCmp(srcPart.PartName, []string{forcePartName})
	case len(SpecifiedPartition) > 0:
		for _, v := range SpecifiedPartition {
			plan2.Object.PartName = append(plan2.Object.PartName, partLikeFilter(v, eq)...)
		}
	default:
		for _, c := range eq {
			plan2.Object.PartName = append(plan2.Object.PartName, partLikeFilter(c, srcPart.PartName)...)
		}
	}
	switch {
	case len(IgnorePartition) > 0:
		plan2.Object.PartName = ignorePartGarth(plan2.Object.PartName, IgnorePartition)
	}
	if len(plan2.Object.PartName) == 0 {
		plan2.Object.PartName = []string{"single"}
	}
}
func initRowsSchedulePlan(s *SchemaInitContext) (plan2 *SchedulePlan2) {
	plan2 = newRowsSchedulePlan(s)
	partMetaInit(s, plan2)
	rowsSchedulePlanTableMetaInit(plan2)
	writeOptimizer(plan2)
	readOptimizer(plan2)
	plan2.MQ = mq.NewMonitorMsgMq()
	plan2.MQ.Speed = speedInit(plan2)
	initSendMsg(plan2)
	subTaskInit(plan2)
	if s.TPod.TableMiss {
		plan2.Object.TableMiss = true
	}
	return
}

func InitToSchedulePlan2TableExecFunc(event string, f any) (r any) {
	_, chanParameter, _ := schemaMetaExecFuncParameterSplit(f.([]any))
	if funcName, ok := ScheduleTaskInitFuncRef[GlobalPConfigs.rules.CheckMode]; ok {
		if funcName.Kind() == reflect.Func {
			arguments := []reflect.Value{
				reflect.ValueOf(chanParameter)}
			m1 := funcName.Call(arguments)
			var m2 *SchedulePlan2
			for i := 0; i < len(m1); i++ {
				m2 = m1[i].Interface().(*SchedulePlan2)
			}
			ChanSchedulePlanTable <- m2
		}
	}
	return
}
func InitToSchedulePlan2TableFinishFunc(event string, _ any) (r any) {
	ref.MyWaitDel("scheduleObject")
	close(ChanSchedulePlanTable)
	return
}
func plan2TableScheduleObjectInit(f2 *SchemaInitContext, ChanSchedulePlanTable any) (send *mq.MonitorMsgMq) {
	ref.MyWaitAdd("scheduleObject")
	send = mq.NewMonitorMsgMq()
	send.SendMsg.MsgModifyKey = f2.ChanSchemaContext
	send.NormalQuit = global.EFunc{FuncName: InitToSchedulePlan2TableFinishFunc}
	send.ExecFunc = []global.EFunc{{FuncName: InitToSchedulePlan2TableExecFunc, Params: []any{ChanSchedulePlanTable}}}
	return send
}

// InitToSchedulePlan2Table 初始化 执行计划 以 map 的形式 key 表名 value SchedulePlan2
func InitToSchedulePlan2Table() chan any {
	var f2 = global.GetIoc().GetBean("schemaInitContext").(*SchemaInitContext)
	ChanSchedulePlanTable = make(chan any, f2.TableSum)
	go func() {
		if plan2TableScheduleObjectInit(f2, ChanSchedulePlanTable).NormalChanSendMsg("chR -> func(InitToSchedulePlan2Table)") != nil {
			return
		}
	}()
	return ChanSchedulePlanTable
}
