package SheXiang_mq

import (
	"db2s/global"
	"db2s/go-log/log"
	"fmt"
	"os"
	"reflect"
	"sort"
	"strings"
	"sync"
	"sync/atomic"
	"time"
)
type (
	Monitor struct {
		MsgS       map[string] /*TopicName*/ *Msg
		ctx        chan int
		topicInfos []*TopicPublishInfo
		mqFactory  *MqFactory
	}
	Samp struct {
		SourceSum           *int64
		DestSum             *int64
		SampProportionChunk *int64
		SampProportionSum   *int64
		SAccumulate         *int64
		DAccumulate         *int64
		OutlierDetection    *int64
	}
	SubTasks struct {
		Schema               string            `json:"schema"`
		Table                string            `json:"table"`
		Loc                  uint64            `json:"loc"` //表的预估行数
		TopicCount           map[string]*int64 `json:"topicCount"`
		BeginTime            *int64            `json:"beginTime"` //每个任务开始的运行时间戳
		Samp                 *Samp             `json:"samp"`
		MqProductCounter     *int64            //消息队列产生计数器
		MqConsumptionCounter *int64            //消息队列消费计数器
		RecordCount          *int64            //表的实际行数
		OutlierDetection     *int64            //表异常行数
		SAccumulate          *int64            // select查询值
		DAccumulate          *int64            //insert 写入值
		TableObject          string            //表的角色 N表示normal表，P表示partition表
		IndexObject          atomic.Value      `json:"indexObject"`   //表是否包含索引，M表示无索引，I表示有索引
		IndexName            atomic.Value      `json:"indexName"`     //选中的索引名
		IndexCardinality     atomic.Value      `json:"cardinality"`   //索引的索引离散度
		PartitionName        atomic.Value      `json:"partitionName"` //显示表的分区表名称，single表示单表，其他则表示分区表
		TaskStatus           *int64            //表结束状态  1表示初始化，2表示运行,3表示完成，-1表示abnormal，-2表示error
		ErrorInfo            atomic.Value
		XlsOutPutResult      any
	}
	SpeedLimitMonitor struct {
		Schema string
		Table  string
		RS     string
		WS     string
		SS     string
		SR     string
		RWD    string
		RR     string
		WR     string
		RP     string
		WP     string
		BS     string
		NS     string
		RBS    string
	}
	TaskBar struct {
		TaskSeq   int64
		BeginTime time.Time
		Rw        sync.Mutex
		CheckMod  string
		// 总任务数
		Ctx               chan int
		Total             *int64 //总共的表
		Accumulate        int64  // 完成的表
		Free              *int64 //剩余的表
		BreakPointSum     *int64 //跳过的数
		CurrentSchedule   []string
		FinishFile        *os.File
		SubTasks          chan /*TaskName*/ *SubTasks           //表级别任务变更接收
		SpeedLimitMonitor chan /*SpeedLimit*/ SpeedLimitMonitor //表级别的限速
		LogSeq            int64
		Event             string
		FinishResultC     chan any
	}
	Msg struct {
		// topicName name for monitoring
		TopicName string
		// consumerGroup name for monitoring
		ConsumerGroup string
		// taskCount task count for monitoring
		TaskCount int64
		// timeCount time count for monitoring
		TimeCount int64
		// maxTime max time task  for monitoring
		MaxTime int64
		// minTime min time task for monitoring
		MinTime int64

		Running int64

		Waiting int64
		// started
		StartTime time.Time
		//
		LastTaskTime int64
		//
		toPicConfig *TopicPublishInfo
	}
)

func (t *TaskBar) TopicBarAdd(n any) {
	subTask, topic := t.taskAndTopic(n)
	atomic.AddInt64(subTask.TopicCount[topic], 1)
}

func (t *TaskBar) taskAndTopic(n any) (subTask *SubTasks, topic string) {
	switch fmt.Sprintf("%v", reflect.TypeOf(n)) {
	case "string":
	case "map[string]interface {}":
		n1 := n.(map[string]any)
		if v, ok := n1["subTask"]; ok {
			subTask = v.(*SubTasks)
		}
		if v, ok := n1["topic"]; ok {
			topic = fmt.Sprintf("%v", v)
		}
	}
	return
}
func (t *TaskBar) TopicBarDel(n any) {
	subTask, topic := t.taskAndTopic(n)
	atomic.AddInt64(subTask.TopicCount[topic], -1)
}

// BarSubsStatusSwapSend 初始化状态发送
func (t *TaskBar) BarSubsStatusSwapSend(subTask *SubTasks) {
	defer func() {
		if r := recover(); r != nil {
			log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chunal fail."))
		}
	}()
	t.SubTasks <- subTask
}
func (t *TaskBar) BarSubsStatusSwap(subTask *SubTasks, r string) {
	defer func() {
		if r1 := recover(); r1 != nil {
			log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chunal fail. error is {%v}", r1))
		}
	}()
	switch r {
	case "running":
		atomic.SwapInt64(subTask.TaskStatus, 2)
		atomic.SwapInt64(subTask.BeginTime, time.Now().UnixNano())
	case "init":
		atomic.SwapInt64(subTask.TaskStatus, 1)
	case "finish":
		atomic.SwapInt64(subTask.TaskStatus, 3)
	case "error":
		atomic.SwapInt64(subTask.TaskStatus, -2)
	case "abnormal":
		atomic.SwapInt64(subTask.TaskStatus, -1)
	}
	t.SubTasks <- subTask
}

// BarSubsValueSwap 修改bar运行的值
func (t *TaskBar) BarSubsValueSwap(subTask *SubTasks, s, r string) {
	defer func() {
		if r1 := recover(); r1 != nil {
			log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chunal fail. error is {%v}", r1))
		}
	}()
	if p, err := global.StructToMapAny(subTask); err != nil {
		return
	} else {
		for k := range p {
			if strings.EqualFold(k, s) {
				if subTask.ErrorInfo.Load() == nil {
					switch k {
					case "partitionName":
						subTask.PartitionName.Store(fmt.Sprintf("%v", r))
					case "indexName":
						subTask.IndexName.Store(fmt.Sprintf("%v", r))
					case "cardinality":
						subTask.IndexCardinality.Store(fmt.Sprintf("%v", r))
					case "indexObject":
						subTask.IndexObject.Store(fmt.Sprintf("%v", r))
					}
					t.SubTasks <- subTask
				}
			}
		}
	}
}
func (t *TaskBar) BarSubsXlsDataOutput(subTask *SubTasks, r any) {
	defer func() {
		if r1 := recover(); r1 != nil {
			log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chunal fail."))
		}
	}()
	subTask.XlsOutPutResult = r
	t.SubTasks <- subTask
}
func (t *TaskBar) BarSubsErrorSwap(subTask *SubTasks, r string) {
	defer func() {
		if r := recover(); r != nil {
			//log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chan fail."))
		}
	}()
	if len(fmt.Sprintf("%v", subTask.ErrorInfo.Load())) == 0 {
		subTask.ErrorInfo.Store(fmt.Sprintf("%v", r))
		t.SubTasks <- subTask
	}
}
func (t *TaskBar) BarSubsStatusGet(subTask *SubTasks) (r int64) {
	return atomic.LoadInt64(subTask.TaskStatus)
}

func (t *TaskBar) DelBar(n any) {
	//fmt.Println("-------------------fff:", table, t.SubTasks)
	//-------打印函数调用方
	//pc, _, _, _ := runtime.Caller(1)
	//callingFunc := runtime.FuncForPC(pc)
	//// 打印调用函数的信息
	//fmt.Println("调用函数名称:", callingFunc.Name())
	//fmt.Println("调用函数文件:", callingFunc.FileLine(pc))
	defer func() {
		if r := recover(); r != nil {
			log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chunal fail."))
		}
	}()
	subTask, _ := t.taskAndTopic(n)
	atomic.AddInt64(t.Free, -1)
	atomic.AddInt64(&t.Accumulate, 1)
	atomic.SwapInt64(subTask.TaskStatus, 3)
	t.SubTasks <- subTask
}

func (t *TaskBar) BarClose(table string, v *SubTasks, status *map[string]bool) (s int) {
	event := "[Bar Run]"
	st := *status
	if v1, ok1 := st[table]; ok1 && v1 {
		return -3
	}
	if *v.TaskStatus == -2 { //出现错误，异常退出
		atomic.AddInt64(t.Free, -1)
		atomic.AddInt64(&t.Accumulate, 1)
		log.MonitorLog().Warn(fmt.Sprintf("%s Table %v migration encountered an error and exited monitoring.", event, table))
		st[table] = true
		return -2
	}
	if *v.TaskStatus == 3 { //正常状态完成finish
		atomic.AddInt64(t.Free, -1)
		atomic.AddInt64(&t.Accumulate, 1)
		log.MonitorLog().Info(fmt.Sprintf("%s Table %v completes migration and exits monitoring.", event, table))
		st[table] = true
		return 0
	}
	return int(*v.TaskStatus)
}
func (t *TaskBar) BarAccumulate(n any) {
	defer func() {
		if r := recover(); r != nil {
			//log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chan fail."))
		}
	}()
	subTask, _ := t.taskAndTopic(n)
	l := n.(map[string]any)
	if v, ok := l["select"]; ok {
		atomic.AddInt64(subTask.SAccumulate, v.(int64))
	}
	if v, ok := l["insert"]; ok {
		atomic.AddInt64(subTask.DAccumulate, v.(int64))
	}
	if v, ok := l["sum"]; ok {
		atomic.AddInt64(subTask.RecordCount, v.(int64))
	}
	if v, ok := l["product"]; ok {
		atomic.AddInt64(subTask.MqProductCounter, v.(int64))
	}
	if v, ok := l["consumption"]; ok {
		atomic.AddInt64(subTask.MqConsumptionCounter, v.(int64))
	}
	t.SubTasks <- subTask
}
func (t *TaskBar) SpeedAccumulate(n any) {
	defer func() {
		if r := recover(); r != nil {
			//log.MonitorLog().Warn(fmt.Sprintf("[BarAccumulate] send subtask chan fail."))
		}
	}()
	//subTask, _ := t.taskAndTopic(n)
	t.SpeedLimitMonitor <- n.(SpeedLimitMonitor)
}
func NewMonitor(mqFactory *MqFactory) *Monitor {
	m := make(map[string]*Msg)
	now := time.Now()
	for key, info := range mqFactory.TopicPublishInfoTable {
		m[key] = &Msg{
			MinTime:     -1,
			TopicName:   key,
			StartTime:   now,
			toPicConfig: info,
		}
	}
	return &Monitor{
		mqFactory: mqFactory,
		MsgS:      m,
		ctx:       make(chan int),
	}
}
func (m *Monitor) Surround(listener func(message Message) ConsumeConcurrentlyStatus, msg Message) {
	//defer func() {
	//	if r := recover(); r != nil {
	//		fmt.Println("send msg mq fail.", r)
	//	}
	//}()
	now := time.Now()
	mqStatus := listener(msg)
	switch mqStatus {
	case 0:
		atomic.AddInt64(m.mqFactory.mqExecs, 1)
		atomic.AddInt64(m.mqFactory.mqSuccess, 1)
	}
	m.add(msg.Topic, now)
}

func (m *Monitor) add(topic string, now time.Time) {
	since := time.Since(now)
	pool := m.MsgS[topic].toPicConfig.ToPicConfig.pool
	atomic.AddInt64(&m.MsgS[topic].TaskCount, 1)
	atomic.AddInt64(&m.MsgS[topic].TimeCount, int64(since))
	atomic.CompareAndSwapInt64(&m.MsgS[topic].MaxTime, m.MsgS[topic].MaxTime, max(m.MsgS[topic].MaxTime, int64(since)))
	atomic.CompareAndSwapInt64(&m.MsgS[topic].MinTime, m.MsgS[topic].MinTime, min(m.MsgS[topic].MinTime, int64(since)))
	atomic.CompareAndSwapInt64(&m.MsgS[topic].LastTaskTime, m.MsgS[topic].LastTaskTime, int64(time.Since(m.MsgS[topic].StartTime)))
	atomic.CompareAndSwapInt64(&m.MsgS[topic].Running, m.MsgS[topic].Running, int64(pool.Running()))
	atomic.CompareAndSwapInt64(&m.MsgS[topic].Waiting, m.MsgS[topic].Running, int64(pool.Waiting()))
}
func (m *Monitor) Print() {
	keys := make([]string, 0)
	for key := range m.MsgS {
		keys = append(keys, key)
	}
	sort.Strings(keys)
	for _, key := range keys {
		msg := m.MsgS[key]
		sprintf := fmt.Sprintf("Topic: %v,容量: %v monitor: %v", key, msg.toPicConfig.TopicBlockageMessageQueueCount(), msg.info())
		fmt.Println(sprintf)
	}
	fmt.Println("================================================================")
}

func (t *TaskBar) clearScreen() {
	fmt.Print("\033[2J") // 清空屏幕
	fmt.Print("\033[H")  // 将光标移动到屏幕左上角
	//fmt.Print("\033[2J\033[H")
	//fmt.Print("\033[?25l") // 隐藏光标
}
func progressBar(current, total int64) {
	width := 50
	percent := float64(current) / float64(total) * 100
	progress := int(float64(width) * (percent / 100))
	fmt.Printf("\rOverall progress: [")
	for i := 0; i < progress; i++ {
		fmt.Print("=")
	}
	fmt.Printf(">")
	if total > 0 {
		for i := progress; i < width; i++ {
			fmt.Print(" ")
		}
	}
	fmt.Printf("] %.2f%%\n\n", percent)
}

type KeyValue struct {
	Key   string
	Value int
}

func (t *TaskBar) barMapKeySort(tasks map[string]int) []string {
	var keyValueSlice []KeyValue
	var b []string
	for key, value := range tasks {
		keyValueSlice = append(keyValueSlice, KeyValue{key, value})
	}
	sort.Slice(keyValueSlice, func(i, j int) bool {
		return keyValueSlice[i].Value < keyValueSlice[j].Value
	})
	for _, kv := range keyValueSlice {
		b = append(b, kv.Key)
	}
	return b
}
func (t *TaskBar) barRemoveElement(slice *[]string, element string) *[]string {
	index := -1
	for i, v := range *slice {
		if v == element {
			index = i
			break
		}
	}
	if index == -1 {
		return slice // 没有找到指定元素，直接返回原切片
	}
	l := *slice
	copy(l[index:], l[index+1:])
	m := l[:len(l)-1]
	return &m
}
func (t *TaskBar) speedLimit() {

}
func (t *TaskBar) barOutput(subtasks *map[string]*SubTasks, tasks *map[string]int, speedTask map[string]SpeedLimitMonitor, deleteTaskM map[string]int, status *map[string]bool) {
	//t.clearScreen()
	fmt.Printf("\033[0G~~~~~~~~~~~~~~~~~~~~~~~~~~db2s start~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
	progressBar(t.Accumulate, atomic.LoadInt64(t.Total))
	log.MonitorLog().Debug(fmt.Sprintf("(%d) %s %s Completed table %v and total table %b included in task progress bar", t.LogSeq, t.Event, "barOutput", t.Accumulate, atomic.LoadInt64(t.Total)))
	var printBarInfo = make(map[string] /*tableName*/ string /*barInfo*/)
	var printSeq = t.barMapKeySort(*tasks)
	for key, v := range *subtasks {
		if _, ok := deleteTaskM[key]; ok {
			continue
		}
		/*
			Done 表示完成表的数量
			RTN 其中 "RT" 代表 "remaining tables"，"N" 代表 "number"
			ET 其中 "E" 代表 "elapsed"，"T" 代表 "time"
			RTI 其中 "RT" 代表 "running task"，"I" 代表 "info"
		*/
		s := t.BarClose(key, v, status)
		sprintf := fmt.Sprintf("Table Sum: %v,BreakPointSum: %v, Done: %v, RTN: %v,ET: %.2fs RTI: %v", atomic.LoadInt64(t.Total), atomic.LoadInt64(t.BreakPointSum), t.Accumulate, atomic.LoadInt64(t.Free), time.Since(t.BeginTime).Seconds(), t.barInfo(*v, s))
		log.MonitorLog().Debug(fmt.Sprintf(" [monitor] s:%v insert:%v subs:%v info:%v", s, *v.DAccumulate, *v, sprintf))
		if s != 0 && s != 1 && s != -2 {
			printBarInfo[key] = sprintf
		}
		if s == 0 || s == -1 || s == -2 {
			sprintf = fmt.Sprintf("Table Sum: %v, BreakPointSum: %v, Done: %v, RTN: %v,ET: %.2fs RTI: %v", atomic.LoadInt64(t.Total), atomic.LoadInt64(t.BreakPointSum), t.Accumulate, atomic.LoadInt64(t.Free), time.Since(t.BeginTime).Seconds(), t.barInfo(*v, s))
			delete(*subtasks, key)
			deleteTaskM[key]++
			delete(*tasks, key)
		}
	}
	for _, v := range printSeq {
		x, o := speedTask[v]
		_, p := deleteTaskM[v]
		if o && !p {
			//SS (Speed Restricted)   //限速状态   Y表示yes N表示no
			//SR (Speed Restriction)  //限速原因   可选值：RWD RR WR
			//RWD (Reading-Writing Discrepancy) 读写差异比例  比例值
			//RS (Reading Sum) 每秒读取的行数
			//WS (Write  Sum) 每秒写入的行数
			//RR (Read Rate)  //读取速率
			//WR (Write Rate) //写入速率
			//RP (Read Parallel) //读取并发数
			//WP (Write Parallel) //写入并发数
			//BS (Token Bucket Size) //令牌桶容量大小
			//NS (Network Speed)  //每秒传输的速率
			//RBS (Read Block Size)  //读取的块大小
			sprintf := fmt.Sprintf("table:[%v] SS:[%v] SR:[%v] RWD:[%v%%] RS:[%v/s] WS:[%v/s] RR:[%v/ms] WR:[%v/ms] RP:[%v] WP:[%v] BS:[%v] NS:[%v MB] RBS:[%v rows/s]", fmt.Sprintf("%s.%s", x.Schema, x.Table), x.SS, x.SR, x.RWD, x.RS, x.WS, x.RR, x.WR, x.RP, x.WP, x.BS, x.NS, x.RBS)
			fmt.Printf("\033[0G%s\n", sprintf)
		}
	}
	fmt.Printf("\033[0G----------------------------Dividing line------------------------------------\n")
	for _, v := range printSeq {
		if sprintf, ok := printBarInfo[v]; ok {
			fmt.Printf("\033[0G%s\n", sprintf)
		}
	}
	fmt.Printf("\033[0G##########################db2s end######################################\n")
}
func (m *Monitor) Info() map[string]*Msg {
	return m.MsgS
}

func (t *TaskBar) BarCloseAction() {
	fmt.Println("Run Turned closed")
	//t.clearScreen()
	fmt.Printf("\033[0G~~~~~~~~~~~~~~~~~~~~~~~~~~db2s start~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n")
	progressBar(t.Accumulate, atomic.LoadInt64(t.Total))
	fmt.Printf("\033[0G##########################db2s end######################################\n")
	close(t.SubTasks)
	close(t.SpeedLimitMonitor)
}

func (m *Monitor) TurnMonitor() {
	go func() {
		ticker := time.NewTicker(global.MonitorInterval * time.Second)
		for {
			select {
			case <-ticker.C:
				m.Print()
			case <-m.ctx:
				fmt.Println("Run Turned closed")
				return
			}
		}
	}()
}

func (m *Monitor) CloseMonitor() {
	close(m.ctx)
}

func (m *Msg) info() string {
	if m.TaskCount == 0 {
		m.TaskCount = 1
	}
	pool := m.toPicConfig.ToPicConfig.pool
	return fmt.Sprintf("执行总数:[%v] 运行任务数:[%v] 阻塞任务数:[%v] 累加执行时间:[%v],队列实际执行时间:[%v] 执行最小时间:[%v] 执行最大时间:[%v] 执行平均时间:[%v] ", m.TaskCount, pool.Running(), pool.Waiting(), time.Duration(m.TimeCount), time.Duration(m.LastTaskTime), time.Duration(m.MinTime), time.Duration(m.MaxTime), time.Duration(m.TimeCount/m.TaskCount))
}
func (t *TaskBar) second(t1, t2 int64) string {
	// 计算时间戳差值
	diff := t2 - t1
	// 将纳秒差值转换为秒数
	seconds := float64(diff) / float64(time.Second)
	return fmt.Sprintf("%.2f", seconds)
}
func (t *TaskBar) statusStrconv(s int) string {
	var status string
	switch s {
	case 1:
		status = "init"
	case 2:
		status = "running"
	case 0:
		status = "finish"
	case -1:
		status = "abnormal"
	case -2:
		status = "error"
	}
	return status
}
func (t *TaskBar) getBarInfoFunc() map[string]reflect.Value {
	return map[string]reflect.Value{
		"sync":    reflect.ValueOf(t.syncBarInfo),
		"task":    reflect.ValueOf(t.taskBarInfo),
		"load":    reflect.ValueOf(t.loadBarInfo),
		"rows":    reflect.ValueOf(t.rowsBarInfo),
		"count":   reflect.ValueOf(t.countBarInfo),
		"samp":    reflect.ValueOf(t.sampBarInfo),
		"struct":  reflect.ValueOf(t.structBarInfo),
		"default": reflect.ValueOf(t.defaultBarInfo),
	}
}
func (t *TaskBar) barPctInfo(subtasks SubTasks) string {
	var pct = "0"
	if *subtasks.DAccumulate > 0 {
		var sum = subtasks.Loc
		if *subtasks.RecordCount > 0 {
			sum = uint64(*subtasks.RecordCount)
			pct = fmt.Sprintf("%.2f", float64(*subtasks.DAccumulate)/float64(sum)*100)
		}
	}
	return pct
}
func (t *TaskBar) barPctInfo1(s, d *int64) string {
	var pct = "0"
	if *d > 0 {
		var sum = *s
		if sum > 0 {
			pct = fmt.Sprintf("%.2f", float64(*d)/float64(sum)*100)
		}
	}
	return pct
}
func (t *TaskBar) structBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		RTN: 表的耗时
		T：table
	*/
	return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] ", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table))

}

func (t *TaskBar) syncBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		TMD: 表的耗时，即Table Migration Duration的缩写
		T：table
		Toj：显示表的角色，即"Table Object"的缩写,N：表示normal表，P：表示partition表
		Ioj：显示索引的角色，即"Index Object"的缩写，M：表示无索引表，I：表示有索引的表
		Pn：显示点当前的partition name，即"partition name"的缩写，single为单表，其他为partition名称
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		Rrc  源端查询读取的数据，即Read Rows Count的缩写
		Wrc  目标端写入的数据，即write Rows Count的缩写
		IN（Index Name）显示该表选中的索引名
		IC（Index Cardinality）显示该表选中的索引离散度
	*/
	return fmt.Sprintf("Ss:[%s] TMD:[%vs] T:[%v] Sum:[%v] Toj:[%v] Ioj:[%v] IN:[%v] IC:[%v] Pn:[%v] Rrc:[%v] Wrc:[%v] Pct:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.RecordCount, subtasks.TableObject, subtasks.IndexObject, subtasks.IndexName, subtasks.IndexCardinality, subtasks.PartitionName, *subtasks.SAccumulate, *subtasks.DAccumulate, t.barPctInfo(subtasks))
}
func (t *TaskBar) taskBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		TMD: 表的耗时，即Table Migration Duration的缩写
		T：table
		Toj：显示表的角色，即"Table Object"的缩写,N：表示normal表，P：表示partition表
		Ioj：显示索引的角色，即"Index Object"的缩写，M：表示无索引表，I：表示有索引的表
		Pn：显示点当前的partition name，即"partition name"的缩写，single为单表，其他为partition名称
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		Rrc  源端查询读取的数据，即Read Rows Count的缩写
		Wrc  目标端写入的数据，即write Rows Count的缩写
		IN（Index Name）显示该表选中的索引名
		IC（Index Cardinality）显示该表选中的索引离散度
	*/
	return fmt.Sprintf("Ss:[%s] TMD:[%vs] T:[%v] Sum:[%v] Toj:[%v] Ioj:[%v] IN:[%v] IC:[%v] Rrc:[%v] Pct:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.RecordCount, subtasks.TableObject, subtasks.IndexObject, subtasks.IndexName, subtasks.IndexCardinality, *subtasks.SAccumulate, t.barPctInfo(subtasks))
}
func (t *TaskBar) loadBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		TMD: 表的耗时
		T：table
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		Rrc  源端查询读取的数据，即Read Rows Count的缩写
		Wrc  目标端写入的数据，即write Rows Count的缩写
	*/
	return fmt.Sprintf("Ss:[%s] TMD:[%vs] T:[%v] Sum:[%v] Loc:[%v] Rrc:[%v] Wrc:[%v] Pct:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.RecordCount, subtasks.Loc, *subtasks.SAccumulate, *subtasks.DAccumulate, t.barPctInfo(subtasks))
}
func (t *TaskBar) countBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		RTN: 表的耗时
		T：table
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
	*/
	return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] Src:[%v] Drc:[%v] PCT:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.SAccumulate, *subtasks.DAccumulate, t.barPctInfo(subtasks))
}
func (t *TaskBar) rowsBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		RTN: 表的耗时
		T：table
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		OD: 检测到原目标端异常数据 即"Outlier Detection"（异常值检测）的缩写
	*/
	//return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] Sum:[%v] Loc:[%v] Src:[%v] Drc:[%v] OD:[%v] PCT:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.RecordCount, subtasks.Loc, *subtasks.SAccumulate, *subtasks.DAccumulate, *subtasks.OutlierDetection, t.barPctInfo(subtasks))
	return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] Sum:[%v] Loc:[%v] Src:[%v] Drc:[%v] OD:[%v] PCT:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), *subtasks.RecordCount, subtasks.Loc, *subtasks.SAccumulate, *subtasks.DAccumulate, 1, t.barPctInfo(subtasks))
}
func (t *TaskBar) sampBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		RTN: 表的耗时
		T：显示库名.表名，即"table name" (表名)的缩写
		Sum: 显示源端,目标端的实际Count值
		Spc: 按照抽样比例计算的行数 即 "Sampling Proportion Chunk"（抽样比例块数） 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		OD: 检测到原目标端异常数据 即"Outlier Detection"（异常值检测）的缩写
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
	*/
	proc := *subtasks.Samp.SampProportionSum - *subtasks.Samp.SampProportionChunk
	return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] Sum:[%v,%v] Spc:[%v] Src:[%v] Drc:[%v] OD:[%v] Pct:[%v%%]",
		t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table),
		*subtasks.Samp.SourceSum, *subtasks.Samp.DestSum, *subtasks.Samp.SampProportionChunk,
		*subtasks.Samp.SAccumulate, *subtasks.Samp.DAccumulate, *subtasks.Samp.OutlierDetection,
		t.barPctInfo1(subtasks.Samp.SampProportionSum, &proc))
}
func (t *TaskBar) defaultBarInfo(subtasks SubTasks, s int) string {
	/*
		Ss： status
		RTN: 表的耗时
		T：table
		Pct: 进度百分比的常见简称是 即 "percentage" 的缩写
		Src 源端查询行数进度简称是 即"Source Rows Count"（源端行数统计）的缩写
		Drc 目标端查询行数进度简称是 即"Dest Rows Count"（源端行数统计）的缩写
		Rrc  源端查询读取的数据，即Read Rows Count的缩写
		Wrc  目标端写入的数据，即write Rows Count的缩写
	*/
	return fmt.Sprintf("Ss:[%s] RTN:[%vs] T:[%v] Loc:[%v] checkNumber:[%v , %v] PCT:[%v%%]", t.statusStrconv(s), t.second(*subtasks.BeginTime, time.Now().UnixNano()), fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table), subtasks.Loc, *subtasks.SAccumulate, *subtasks.DAccumulate, t.barPctInfo(subtasks))
}

func (t *TaskBar) barInfo(subtasks SubTasks, s int) (bar string) {
	var (
		function reflect.Value
		ok       bool
	)
	if function, ok = t.getBarInfoFunc()[t.CheckMod]; !ok {
		function = t.getBarInfoFunc()["default"]
	}
	if function.Kind() == reflect.Func {
		arguments := []reflect.Value{
			reflect.ValueOf(subtasks),
			reflect.ValueOf(s),
		}
		l := function.Call(arguments)
		for i := 0; i < len(l); i++ {
			bar = fmt.Sprintf("%v", l[i].Interface())
		}
	}
	return
}
func max(o, n int64) int64 {
	if o > n {
		return o
	}
	return n
}

func min(o, n int64) int64 {
	if o > n || o == -1 {
		return n
	}
	return o
}
func (t *TaskBar) contains(slice []string, element string) bool {
	for _, v := range slice {
		if v == element {
			return true
		}
	}
	return false
}
func (t *TaskBar) BarMonitor() chan any {
	go func() {
		ticker := time.NewTicker(global.MonitorInterval * time.Second)
		var subTaskM, speedTaskM = make(map[string]*SubTasks), make(map[string]SpeedLimitMonitor)
		var delTaskM = make(map[string]int)
		var subTaskS = make(map[string]int)
		var subTaskSModify = make(map[string]bool)
		var autoSeq = 0
		for {
			select {
			case subtasks, ok := <-t.SubTasks:
				if !ok {
					fmt.Println("Verification is over and data collection is in progress. Please wait...")
					close(t.FinishResultC)
					return
				} else {
					log.MonitorLog().Debug(fmt.Sprintf("(%d) %s %s Received a monitor message. message is %v", t.LogSeq, t.Event, "BarMonitor", subtasks))
					key := fmt.Sprintf("%s.%s", subtasks.Schema, subtasks.Table)
					if _, ok1 := subTaskM[key]; !ok1 {
						log.MonitorLog().Debug(fmt.Sprintf("(%d) %s %s Added new monitoring statistics table %s.%s", t.LogSeq, t.Event, "BarMonitor", subtasks.Schema, subtasks.Table))
						subTaskM[key] = subtasks
						autoSeq++
						subTaskS[key] = autoSeq
						subTaskSModify[key] = false
					}
				}
			case speedLimit, ok := <-t.SpeedLimitMonitor:
				if ok {
					log.MonitorLog().Debug(fmt.Sprintf("(%d) %s %s Received a monitor message. message is %v", t.LogSeq, t.Event, "SpeedMonitor", speedLimit))
					key := fmt.Sprintf("%s.%s", speedLimit.Schema, speedLimit.Table)
					speedTaskM[key] = speedLimit
				}

			case <-ticker.C:
				if len(subTaskM) == 0 {
					continue
				}
				log.MonitorLog().Debug(fmt.Sprintf("(%d) %s %s Update task progress bar regularly", t.LogSeq, t.Event, "BarMonitor"))
				t.barOutput(&subTaskM, &subTaskS, speedTaskM, delTaskM, &subTaskSModify)
				//if atomic.LoadInt64(t.Total) == t.Accumulate {
				//	t.BarCloseAction()
				//}
			}
		}
	}()
	return t.FinishResultC
}
