package loadData

import (
	"database/sql"
	"errors"
	"fmt"
	metaD "db2s/MetaInit"
	"db2s/global"
	"db2s/outPut"
	quit "db2s/quitDispos"
	"db2s/tableEr"
	mq "db2s/topic-mq"
	"os"
	"reflect"
	"runtime"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)

// dirInit  目录初始化
func dirInit(path string, logSeq int64) bool {
	var (
		vlog  string
		event = "[dirInit]"
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%v) %v %v Start directory initialization.", logSeq, callFuncInfo, event)
	WLog.Debug(vlog)
	if _, err := os.Stat(path); os.IsNotExist(err) {
		err1 := os.MkdirAll(path, os.ModePerm)
		if err1 != nil {
			WLog.Error(fmt.Sprintf("(%v) %v %v create dir path fail. error info is %s", logSeq, callFuncInfo, event, err1))
			return false
		}
	}
	return true
}

func loadBreakPointF(config *GlobalParametersConfig) bool {
	return config.rules.Load.Breakpoint
}
func breakPointFuncMap() map[string]reflect.Value {
	return map[string]reflect.Value{
		"load": reflect.ValueOf(loadBreakPointF),
	}
}

func getBreakPointStatus(config *GlobalParametersConfig) bool {
	if funcName, ok := breakPointFuncMap()[config.rules.CheckMode]; ok {
		args := []reflect.Value{
			reflect.ValueOf(config),
		}
		l := funcName.Call(args)
		return l[0].Interface().(bool)
	}
	return false
}

func expensiveOperation(wg *sync.WaitGroup, message mq.Message, done chan struct{}, sum int64, logSeq int64) {
	var (
		tableName = string(message.Body)
		plan      = message.Properties["plan"].(map[string]*SchedulePlan2)[tableName]
		event     = "[expensiveOperation]"
		vlog      string
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	defer func() {
		done <- struct{}{}
		wg.Done()
	}()
	vlog = fmt.Sprintf("(%d) %v %v Start monitoring table %v.%v tasks and when to start the exit mechanism.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
	WLog.Debug(vlog)
	ticker := time.NewTicker(global.MonitorInterval * time.Second)
	for {
		select {
		case <-ticker.C:
			if e := atomic.LoadInt64(plan.Status.ErrEnd); e < 0 {
				return
			}
			if atomic.LoadInt64(plan.mqProductCustomerMonitor.MqStartTiming) == sum {
				if atomic.LoadInt64(plan.mqProductCustomerMonitor.MqLevelProductSend) == atomic.LoadInt64(plan.mqProductCustomerMonitor.MqLevelCustomerReceive) {
					vlog = fmt.Sprintf("(%d) %v %v Table %v.%v starts to start the exit task detection mechanism.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
					WLog.Debug(vlog)
					return
				}
			}
		}
	}
}

func loadXlsWriterTerminalData(plan *SchedulePlan2, logseq int64) error {
	var (
		event      = "[xlsWriterTerminalData]"
		vlog       string
		syncStatus string
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%d) %v %v Start writing the execution results of table %v.%v into xls...", logseq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
	WLog.Debug(vlog)

	if v, ok := terminalPods[getTableName(plan.Object.Schema, plan.Object.Table)]; ok {
		vlog = fmt.Sprintf("(%d) %v %v Start writing the execution results of table %v.%v into xls...", logseq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
		WLog.Debug(vlog)
		if e := atomic.LoadInt64(plan.Status.ErrEnd); e < 0 {
			v.Load.SyncInfo = "error"
			syncStatus = "error"
			vlog = fmt.Sprintf("(%d) %v %v There is an error in table %v.%v Modify the result status information to error.", logseq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
			WLog.Warn(vlog)
		} else {
			syncStatus = "finish"
		}
		v.Load.Seq = fmt.Sprintf("%v", plan.Object.TaskSeq)
		//sheet1
		ResultStatisticsMq <- v.Load
		//sheet2
		resultData := global.XLoadSheetTail{Seq: fmt.Sprintf("%v", plan.Object.TaskSeq), Schema: plan.Object.Schema,
			Table: plan.Object.Table, SyncSum: plan.subTaskInfo.RecordCount, SelectRows: plan.subTaskInfo.SAccumulate, InsertRows: plan.subTaskInfo.DAccumulate,
			SyncStatus: syncStatus, TableCost: global.Second(plan.Status.SubTaskBeginTime, time.Now().UnixNano()),
			Comment: fmt.Sprintf("%v", plan.subTaskInfo.ErrorInfo.Load())}
		BarResultStatisticMq <- resultData
		vlog = fmt.Sprintf("(%d) %v %v The execution result of table %v.%v is sent successfully. result is {%v}", logseq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table, v)
		WLog.Debug(vlog)
	} else {
		vlog = fmt.Sprintf("(%d) %v %v Table %v.%v does not exist and the execution results cannot be written to xls!!", logseq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
		WLog.Warn(vlog)
	}
	return nil
}

func writeBreakPointData(fin *os.File, s any, logSeq int64) error {
	WriteTextI := outPut.FileOut("text", map[string]any{"fileN": fin, "fileName": global.BreakPointFileName, "logSeq": logSeq,
		"bufSize": 1024 * 1024 * 4, "sqlType": "sql", "rowsS": "\n",
	})
	if !WriteTextI.AppendWrite("", s) {
		err := errors.New(fmt.Sprintf("%v", `write data fail!`))
		return err
	}
	return nil
}
func getXlsWriteTerminalFunc() map[string]reflect.Value {
	return map[string]reflect.Value{
		"load": reflect.ValueOf(loadXlsWriterTerminalData),
	}
}
func xlsWriterTerminalData(plan *SchedulePlan2, logSeq int64) error {
	if funcName, ok := getXlsWriteTerminalFunc()[GlobalPConfigs.rules.CheckMode]; ok {
		if funcName.Kind() == reflect.Func {
			args := []reflect.Value{
				reflect.ValueOf(plan),
				reflect.ValueOf(logSeq),
			}
			s := funcName.Call(args)
			return errors.New(fmt.Sprintf("%v", s[0].Interface()))
		}
	}
	return nil
}
func analyzeTable(plan *SchedulePlan2, logSeq int64) error {
	var (
		vlog  string
		event = "[analyzeTable]"
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	vlog = fmt.Sprintf("(%v) %v %v Start the analyze operation of table %v.%v", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
	WLog.Debug(vlog)
	if _, err := Er.TActive(Er.TableActive{DBType: GlobalPConfigs.dSns.SrcDBName}).Analyze(
		global.TableActiveInput{
			LogSeq: logSeq,
			DB:     GlobalPConfigs.SDB,
			TableInfo: global.TableInfo{
				Schema: plan.Object.Schema,
				Table:  plan.Object.Table,
			},
			Optimizer: global.WriteOptimizer{
				SqlMode:   GlobalPConfigs.rules.Load.SqlMode,
				SqlLogBin: GlobalPConfigs.rules.Load.SqlLogBin,
			},
		}); err != nil {
		vlog = fmt.Sprintf("(%v) %v %v The analyze operation of table %v.%v failed. error info is {%v}", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table, err)
		WLog.Error(vlog)
		return err
	}
	vlog = fmt.Sprintf("(%v) %v %v The analyze operation of table %v.%v was successful.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
	WLog.Debug(vlog)
	return nil
}

// 每个表的mq消息状态监听，结束时发送状态给bar
func mqTableStatusMonitor(done, done1 chan struct{}, wg *sync.WaitGroup, plan *SchedulePlan2, logSeq int64) {
	var (
		event = "[expensiveOperation]"
		vlog  string
	)
	pc, _, _, _ := runtime.Caller(1)
	callingFunc := runtime.FuncForPC(pc)
	callFile, callLine := callingFunc.FileLine(pc)
	callFunc := callingFunc.Name()
	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
	defer func() {
		done1 <- struct{}{}
		wg.Done()
	}()
	vlog = fmt.Sprintf("(%d) %v %v Start monitoring the mq queue status of table %v.%v. The default timeout is 360 days.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
	WLog.Debug(vlog)
	for {
		select {
		case d, ok := <-plan.mqProductCustomerMonitor.ProductMessage:
			if !ok {
				if e := atomic.LoadInt64(plan.Status.ErrEnd); e < 0 {
					close(plan.writeOptimizer.QueueData)
					vlog = fmt.Sprintf("(%d) %v %v An error occurs during the execution of table %v.%v, and error handling is performed.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
					WLog.Warn(vlog)
					if err := xlsWriterTerminalData(plan, logSeq); err != nil {
						return
					}
					PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
					return
				}
				if *plan.mqProductCustomerMonitor.MqProductSeq == *plan.mqProductCustomerMonitor.MqCustomerSeq {
					close(plan.writeOptimizer.QueueData)
					vlog = fmt.Sprintf("(%d) %v %v It is detected that the message queue production quantity and consumption quantity in table %v.%v are equal.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
					WLog.Debug(vlog)
					if strings.EqualFold(GlobalPConfigs.rules.Load.LoadMode, "input") {
						if GlobalPConfigs.rules.Sync.Analyze {
							if err := analyzeTable(plan, logSeq); err != nil {
								PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
								return
							}
						}
					}
					st := getTableName(plan.Object.Schema, plan.Object.Table)
					if err := writeBreakPointData(GlobalPConfigs.rules.BreakPointFin, []*string{&st}, logSeq); err != nil {
						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
						return
					}
					if err := xlsWriterTerminalData(plan, logSeq); err != nil {
						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
						return
					}
					PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
					vlog = fmt.Sprintf("(%v) %v %v Table %v.%v data migration is completed.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
					WLog.Info(vlog)
					return
				}

			} else {
				for _, v := range d {
					atomic.AddInt64(plan.mqProductCustomerMonitor.MqProductSeq, int64(len(v)))
				}
			}
		case <-done:
			close(plan.mqProductCustomerMonitor.ProductMessage)
			vlog = fmt.Sprintf("(%d) %v %v It is detected that there is no update in the message queue of table %v.%v within 5 seconds, and the queue writing is closed.", logSeq, callFuncInfo, event, plan.Object.Schema, plan.Object.Table)
			WLog.Debug(vlog)
		}
	}
}

// NewSchedulePlanConfig 初始化全局共享变量
func NewSchedulePlanConfig(l *SchemaInitContextConfig) (*GlobalParametersConfig, error) {
	var (
		err           error
		SDB           map[string]*sql.DB
		SMS, SMScurry map[string]int64
		timestamp     = time.Now().Format("2006-01-02T15-04-05")
		m             = l.GtCheckConfig
		tl            = l.TableList
		f1            any
	)
	SMScurry = make(map[string]int64)
	if f1, err = Er.CActive(Er.ConnActive{DBType: m.SecondaryL.DSnsV.SrcDBName}).Open(global.Conn{
		Device:    m.SecondaryL.DSnsV.SrcDrive,
		Jdbc:      m.SecondaryL.DSnsV.SrcJdbc,
		MaxOpen:   m.SecondaryL.DBPoolV.MaxOpenConn,
		MaxIdle:   m.SecondaryL.DBPoolV.MaxIdleConn,
		LeftTime:  m.SecondaryL.DBPoolV.ConnMaxLifeTime,
		IdleTime:  m.SecondaryL.DBPoolV.ConnMaxIdleTime,
		Optimizer: m.SecondaryL.RulesV.Sync.WriteParameterOpt,
	}); err != nil {
		return nil, err
	} else {
		switch fmt.Sprintf("%v", reflect.TypeOf(f1)) {
		case "map[string]interface {}":
			if v1, ok := f1.(map[string]any)["map"]; ok {
				SDB = v1.(map[string]*sql.DB)
			}
			if v1, ok := f1.(map[string]any)["sum"]; ok {
				SMS = v1.(map[string]int64)
			}
		}
	}
	for k, v := range SMS {
		SMScurry[k] = v / 2
	}
	config := &GlobalParametersConfig{
		repair:       m.SecondaryL.RepairV,
		rules:        m.SecondaryL.RulesV,
		dSns:         m.SecondaryL.DSnsV,
		schema:       m.SecondaryL.SchemaV,
		TableList:    tl,
		log:          m.SecondaryL.LogV,
		structs:      m.SecondaryL.RulesV.Struct,
		result:       m.SecondaryL.ResultV,
		activeSwitch: m.SecondaryL.ActiveSwitchV,
		SDB:          SDB,
		InstanceConfig: global.DBInstanceConfig{
			LowPositionConn:   10,
			CurryPositionConn: map[string]map[string]int64{"source": SMScurry},
			HighPositionConn:  map[string]map[string]int64{"source": SMS},
		},
		DBPool: m.SecondaryL.DBPoolV,
	}
	global.GTableCurry = make(chan struct{}, m.SecondaryL.RulesV.Mtc)
	//初始化输出目录result
	for _, v := range []string{"result"} {
		if !dirInit(v, 1) {
			return nil, errors.New(fmt.Sprintf("%v", "init dir fail."))
		}
	}
	var xlsFileName string
	if config.result.GeneralResult {
		xlsFileName = config.result.XlsV.XlsFileName
		if !strings.Contains(xlsFileName, "/") {
			xlsFileName = fmt.Sprintf("./result/%v_%v", xlsFileName, timestamp)
		}
	}
	if config.result.GeneralResult {
		ResultStatisticsGeneral = ResultGeneral{Xls: XlsResult{
			XlsFileName:        xlsFileName,
			XlsSheetNameGather: config.result.XlsV.XlsSheetNameGather,
			XlsTatilGather:     config.result.XlsV.XlsTailGather,
			XlsSafetySwitch:    config.result.XlsV.XlsSafetySwitch,
		}, Type: "xls"}
		if ResultStatisticsGeneral.InitResultSaveFile("xls") {
			ResultStatisticsMq = make(chan any, config.rules.QueueSize)
		}
	}
	BarResultStatisticMq = make(chan any, config.rules.QueueSize)
	if getBreakPointStatus(config) {
		if config.rules.BreakPointFin, err = os.OpenFile(fmt.Sprintf("%v", global.BreakPointFileName), os.O_APPEND|os.O_RDWR|os.O_CREATE, 0664); err != nil {
			WLog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
			return nil, err
		}
	} else {
		if config.rules.BreakPointFin, err = os.OpenFile(fmt.Sprintf("%v", global.BreakPointFileName), os.O_TRUNC|os.O_APPEND|os.O_RDWR|os.O_CREATE, 0664); err != nil {
			WLog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
			return nil, err
		}
	}
	if config.rules.ActiveDataFin, err = os.OpenFile(fmt.Sprintf("%v_%v.result", global.ActiveDataResultFileName, timestamp), os.O_TRUNC|os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0664); err != nil {
		WLog.Error(fmt.Sprintf("init active data file fail. error info is %s", err))
		return nil, err
	}
	if config.TtyBarFin, err = os.OpenFile(fmt.Sprintf("%v_%v.result", global.MonitorBarFinishFileName, timestamp), os.O_TRUNC|os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0664); err != nil {
		WLog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
		return nil, err
	}
	if strings.EqualFold(config.rules.Load.LoadMode, "output") {
		if err = os.RemoveAll(config.rules.Load.LoadFilePath); err != nil {
			return nil, err
		}
	}
	closeObject = quit.GetClose(map[string]any{
		"dsn": map[string]any{"name": m.SecondaryL.DSnsV.SrcDBName,
			"drive": m.SecondaryL.DSnsV.SrcDrive,
			"jdbc":  m.SecondaryL.DSnsV.SrcJdbc,
		},
	}, SDB, []*os.File{config.rules.BreakPointFin, config.rules.ActiveDataFin, config.TtyBarFin}, ResultStatisticsGeneral.SaveFile)

	global.GetIoc().RegisterContext("globalConfig", config)
	global.GetIoc().RegisterContext("closeObject", closeObject)
	return config, nil
}

// NewSchemaInitConfig 创建执行计划配置参数
func NewSchemaInitConfig() {
	gobject := &GlobalISObject{}
	gobject.TableMetaData = map[string]Er.TablesMetaInfoEr{
		"source": Er.MetaInfo(Er.TableInfoMeta{
			DBType: GlobalPConfigs.dSns.SrcDBName,
		}),
	}
	global.GetIoc().RegisterContext("isObject", gobject)
	GlobalISObjects = gobject
	return
}
func initSchemaBean(config *SchemaInitContextConfig) error {
	var (
		err error
	)
	//注册输入配置
	global.GetIoc().RegisterContext("configParameter", config.GtCheckConfig)
	//初始化全局共享配置 	//注册执行计划配置 SchedulePlanConfig
	GlobalPConfigs, err = NewSchedulePlanConfig(config)
	if err != nil {
		return err
	}
	if GlobalPConfigs == nil {
		return errors.New("初始化 NewSchedulePlanConfig 执行计划配置失败")
	}
	//初始化结构体或结构对象并注册
	if NewSchemaInitConfig(); GlobalISObjects == nil {
		return errors.New("初始化 NewSchemaInitConfig 执行计划配置失败")
	}
	return nil
}

// 初始化上下文，该模块的全局配置，过滤表名，按照join、left、unio方式
func schemaContext(config *SchemaInitContextConfig) *CheckSchemaMetaData {
	var (
		vlog  string
		event = "[schemaContext]"
	)
	if SchemaContext == nil {
		lock.Lock()
		defer lock.Unlock()
		meta := GetCheckSchemaMetaData()
		if err := initSchemaBean(config); err != nil {
			fmt.Println("创建执行计划上下文 NewSchedulePlanContext 失败", err)
			return nil
		}
		l := metaD.GetTableFilterO()
		o1 := metaD.GetSchemaObject()
		pc, _, _, _ := runtime.Caller(1)
		callingFunc := runtime.FuncForPC(pc)
		callFile, callLine := callingFunc.FileLine(pc)
		callFunc := callingFunc.Name()
		callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
		vlog = fmt.Sprintf("(%d) %v %v Start initializing the schema||table information of the table object.", 1, callFuncInfo, event)
		WLog.Info(vlog)
		for _, v := range config.GtCheckConfig.SecondaryL.SchemaV.TableInput {
			var keys, missKeys []string
			//初始化执行计划表
			l.TableMetaData = GlobalISObjects.TableMetaData
			l.SDB = GlobalPConfigs.SDB
			l.IgDest = true
			for k, v1 := range l.InitToActiveObject(v.Tables) {
				if strings.EqualFold(v1, "ok") {
					keys = append(keys, k)
				} else {
					missKeys = append(missKeys, k)
				}
			}
			if len(keys) > 0 {
				o := metaD.GetSchemaObject()
				o.TableList = keys
				o.TableMiss = missKeys
				o1.TaskTableList = append(o1.TaskTableList, o)
			}
		}
		vlog = fmt.Sprintf("(%v) %v %v The column information initialization of the table object is completed.", 1, callFuncInfo, event)
		WLog.Info(vlog)
		//注册ioc 容器
		global.GetIoc().RegisterContext("schemaContext", o1)
		for _, v := range o1.TaskTableList {
			v1 := metaD.GetMetaDataO()
			v1.TableList = v.TableList
			v1.TableMiss = v.TableMiss
			v1.TableMetaData = l.TableMetaData
			v1.Schema = config.GtCheckConfig.SecondaryL.SchemaV
			v1.Rules = config.GtCheckConfig.SecondaryL.RulesV
			v1.DSns = config.GtCheckConfig.SecondaryL.DSnsV
			v1.SDB = GlobalPConfigs.SDB["single"]
			v1.IgDest = true
			meta.TableMD = append(meta.TableMD, v1)
		}
		meta.ActiveSwitchV = config.GtCheckConfig.SecondaryL.ActiveSwitchV
		return meta
	}
	return nil
}
