package tkM

import (
	"fmt"
	"github.com/pkg/errors"
	"db2s/full"
	"db2s/global"
	"db2s/go-log/log"
	"db2s/ref"
	mq "db2s/topic-mq"
	"sync"
	"sync/atomic"
	"time"
)

type ColumnMetaS struct {
	bigVarcharName string
	bigVarcharType string
}
type PartMetaS struct {
	pType string
	pSum  int
}
type indexMetaS struct {
	result any
	name   string
}
type taskModeResultS struct {
	fixActive string //行为，是create还是alter
	column    ColumnMetaS
	part      PartMetaS
	index     indexMetaS
}

func metaGlobalInitActive(message mq.Message) (err error) {
	var event = "[metaGlobalInitActive]"
	var plan = message.Properties["plan"].(*SchedulePlan2)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	plan.Status.SubTaskBeginTime = time.Now().UnixNano()
	if GlobalPConfigs.result.Teletypewriter == "bar" {
		PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "running")
		log.MainLog().Info(fmt.Sprintf("(%d) %s Add task Bar Run object %v successfully", plan.Object.ObjectLogSeq, event, getTableName(plan.Object.Schema, plan.Object.Table)))
	}
	return
}
func srcCount(plan *SchedulePlan2) (err error) {
	var (
		event       = "[srcCount]"
		tableSum    int64
		countResult any
	)
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	log.MainLog().Info(fmt.Sprintf("(%v) %v Start the row count in %v table %v.", plan.Object.ObjectLogSeq, event, GlobalPConfigs.dSns.SrcDBName, getTableName(plan.Object.Schema, plan.Object.Table)))
	s := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName, GlobalPConfigs.dSns.DestDBName}, "left", plan.Object.Role)
	s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
	if countResult, err = full.GetFullCountST(s); err != nil {
		err = ref.ErrAddPrintf(event, err)
		PlanContext.TaskBarSubsErrorSwap(plan.subTaskInfo, errorActive(plan, fmt.Sprintf("%v", err)))
		log.ErrorLog().Error(fmt.Sprintf("exec count for table %v fail. exec sql is {%v},error is {%v}", getTableName(plan.Object.Schema, plan.Object.Table), countResult.(global.Return).Sql, ref.ErrAddPrintf(event, err)))
		return
	} else {
		tableSum = countResult.(int64)
	}
	atomic.SwapInt64(plan.subTaskInfo.RecordCount, tableSum)
	atomic.AddInt64(plan.TPod.Task.Sum, tableSum)
	log.MainLog().Info(fmt.Sprintf("(%d) %v The row count in table %v of %v was successfully counted. The result is %v.", plan.Object.ObjectLogSeq, event, getTableName(plan.Object.Schema, plan.Object.Table), GlobalPConfigs.dSns.SrcDBName, tableSum))
	return
}
func taskBaseTableCountF(wg *sync.WaitGroup, message mq.Message) (err error) {
	var event = "[taskBaseTableCountF]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	if GlobalPConfigs.rules.Task.IgCount {
		plan.MQ.SetQuitExec()
		return
	}
	wg.Add(1)
	go func() {
		defer func() {
			wg.Done()
			plan.MQ.SetQuitExec()
		}()
		if err = srcCount(plan); err != nil {
			return
		}
	}()
	var result any
	s := full.NewFull([]string{GlobalPConfigs.dSns.SrcDBName}, "left", plan.Object.Role)
	s.Parameter1 = append(s.Parameter1, plan.SrcParameter)
	if result, err = s.GetFullDataLengthST(); err != nil || result == nil {
		return
	}
	plan.TPod.Task.DataSize = result.(float64)
	log.MainLog().Info(fmt.Sprintf("(%v) %v Failed to obtain data size of table %v.%v.", plan.Object.ObjectLogSeq, event, plan.Object.Schema, plan.Object.Table))
	return
}
func taskBaseQueryF(message mq.Message) (err error) {
	var event = "[taskBaseQueryF]"
	defer func() {
		if r := ref.RecoverPanic(event, recover()); err == nil && r != nil {
			err = r
		}
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var taskMetaResult = &taskModeResultS{}
	if !plan.MQ.SendMsgF(getMsgSendData(plan, map[string]any{"logSeq": plan.Object.ObjectLogSeq, "topic": taskStructQuery, "taskMetaResult": taskMetaResult})) {
		return errors.New(fmt.Sprintf("send msg fail"))
	}
	plan.MQ.SpeedLimitSwitch = false
	plan.MQ.FirstObjectProductSwap(1)
	plan.MQ.FirstMsgProductAdd()
	plan.MQ.FirstMsgFinishMarkSwap()
	plan.MQ.SecondMsgProductAdd()
	plan.MQ.SecondMsgFinishMarkSwap()
	return
}

// taskBaseQueryListeners taskBaseQuery  监听执行器
func taskBaseQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var event = "[taskBaseQueryListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	var plan *SchedulePlan2
	var wg sync.WaitGroup
	defer recoverException(event)
	defer func() {
		baseMsgSendBody.MsgCurryDone()
		baseMsgSendBody.MsgCurryWgDone()
	}()
	plan = message.Properties["plan"].(*SchedulePlan2)
	defer deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskBaseQuery, "outFay": GlobalPConfigs.result.Teletypewriter})
	if metaGlobalInitActive(message) != nil {
		return mq.ReconsumeLater
	}
	plan.MQ.Wg, plan.MQ.AbnormalQuit, plan.MQ.NormalQuit = &wg, global.EFunc{FuncName: mqStatusMonitorAbnormalQuit, Params: plan}, global.EFunc{FuncName: mqStatusMonitorNormalQuit, Params: plan}
	plan.MQ.MqTableStatusMonitor(getTableName(plan.Object.Schema, plan.Object.Table))
	if taskBaseTableCountF(&wg, message) != nil {
		return mq.ReconsumeLater
	}
	if taskBaseQueryF(message) != nil {
		return mq.ReconsumeLater
	}
	wg.Wait()
	return mq.ConsumeSuccess
}

// taskStructQueryListeners taskStructQuery  监听执行器
func taskStructQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var event = "[taskStructQueryListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	defer deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskStructQuery, "outFay": GlobalPConfigs.result.Teletypewriter})
	if err := tableObjectRoleActiveCheck(message); err != nil {
		plan.errorAction(message, plan.Object.ObjectLogSeq, err)
		return mq.ReconsumeLater
	}
	if !sendMsg(getCheckMod(), taskPartitionQuery, getTableName(plan.Object.Schema, plan.Object.Table), getMsgSendData(plan, map[string]any{"logSeq": plan.Object.ObjectLogSeq, "topic": taskPartitionQuery,
		"taskMetaResult": message.Properties["taskMetaResult"]})) {
		return mq.ReconsumeLater
	}
	return mq.ConsumeSuccess
}
func classQueryMode(className string) (res bool) {
	for _, subMode := range GlobalPConfigs.rules.Task.ActiveMode {
		switch className {
		case "part":
			switch subMode {
			case "sync", "rows", "struct":
				return true
			}
		case "index":
			switch subMode {
			case "sync", "rows":
				return true
			}
		}
	}
	return
}

// taskPartitionQueryListeners taskPartitionQuery  监听执行器
func taskPartitionQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var event = "[taskPartitionQueryListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	var err error
	defer deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskPartitionQuery, "outFay": GlobalPConfigs.result.Teletypewriter})
	if classQueryMode("part") {
		if plan.TPod.Task.PType, err = partType(plan); err != nil {
			plan.errorAction(message, plan.Object.ObjectLogSeq, err)
			return mq.ReconsumeLater
		}
		if plan.TPod.Task.PSum, err = partSum(plan); err != nil {
			plan.errorAction(message, plan.Object.ObjectLogSeq, err)
			return mq.ReconsumeLater
		}
	}
	if !sendMsg(getCheckMod(), taskIndexQuery, getTableName(plan.Object.Schema, plan.Object.Table), getMsgSendData(plan, map[string]any{
		"logSeq": plan.Object.ObjectLogSeq, "topic": taskIndexQuery, "taskMetaResult": message.Properties["taskMetaResult"],
	})) {
		return mq.ReconsumeLater
	}
	return mq.ConsumeSuccess
}

// taskIndexQueryListeners taskIndexQuery  监听执行器
func taskIndexQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var event = "[taskIndexQueryListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	var (
		plan = message.Properties["plan"].(*SchedulePlan2)
	)
	defer deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskIndexQuery, "outFay": GlobalPConfigs.result.Teletypewriter})
	taskMetaResult := message.Properties["taskMetaResult"].(*taskModeResultS)
	if classQueryMode("index") {
		switch GlobalPConfigs.rules.Task.HistogramScale {
		case 0:
			if indexKeyChoose(message) != nil {
				return mq.ReconsumeLater
			}
		}
	}
	if !sendMsg(getCheckMod(), taskHistogramQuery, getTableName(plan.Object.Schema, plan.Object.Table), getMsgSendData(plan, map[string]any{
		"logSeq": plan.Object.ObjectLogSeq, "topic": taskHistogramQuery, "taskMetaResult": taskMetaResult,
	})) {
		return mq.ReconsumeLater
	}
	return mq.ConsumeSuccess
}

// taskHistogramQueryListeners taskHistogramQuery  监听执行器
func taskHistogramQueryListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var event = "[taskHistogramQueryListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	var plan = message.Properties["plan"].(*SchedulePlan2)
	defer deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskHistogramQuery, "outFay": GlobalPConfigs.result.Teletypewriter})
	if !sendMsg(getCheckMod(), taskIndexChoose, getTableName(plan.Object.Schema, plan.Object.Table), getMsgSendData(plan, map[string]any{
		"logSeq": plan.Object.ObjectLogSeq, "taskMetaResult": message.Properties["taskMetaResult"].(*taskModeResultS), "topic": taskIndexChoose,
	})) {
		return mq.ReconsumeLater
	}
	return mq.ConsumeSuccess
}

// taskIndexChooseListeners taskIndexChoose  监听执行器
func taskIndexChooseListeners(message mq.Message) mq.ConsumeConcurrentlyStatus {
	var plan *SchedulePlan2
	var event = "[taskIndexChooseListeners]"
	defer func() {
		_ = ref.RecoverPanic(event, recover())
	}()
	defer func() {
		deleteTopic(map[string]any{"subTask": plan.subTaskInfo, "topic": taskIndexChoose, "outFay": GlobalPConfigs.result.Teletypewriter})
		plan.MQ.FirstMsgCustomerAdd()
		plan.MQ.FirstObjectCustomerAdd()
		plan.MQ.SecondMsgCustomerAdd()
	}()
	plan = message.Properties["plan"].(*SchedulePlan2)
	if abnormalErrorQuit(plan) {
		return mq.ReconsumeLater
	}
	taskMetaResult := message.Properties["taskMetaResult"].(*taskModeResultS)
	if normalIndexChoose(message, taskMetaResult.index.result) != nil {
		return mq.ReconsumeLater
	}
	return mq.ConsumeSuccess
}
