package inspection

//
//import (
//	"database/sql"
//	"errors"
//	"fmt"
//	metaD "db2s/TableMetaDataInit"
//	"db2s/dbExec"
//	"db2s/global"
//	"db2s/outPut"
//	quit "db2s/quitDispos"
//	mq "db2s/topic-mq"
//	"os"
//	"reflect"
//	"runtime"
//	"strconv"
//	"strings"
//	"sync"
//	"sync/atomic"
//	"time"
//)
//
//// dirInit  目录初始化
//func dirInit(path string, logseq int64) bool {
//	var (
//		vlog  string
//		event = "[dirInit]"
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	vlog = fmt.Sprintf("(%v) %v %v Start directory initialization.", logseq, callFuncInfo, event)
//	global.Wlog.Debug(vlog)
//	if _, err := os.Stat(path); os.IsNotExist(err) {
//		err1 := os.MkdirAll(path, os.ModePerm)
//		if err1 != nil {
//			global.Wlog.Error(fmt.Sprintf("(%v) %v %v create dir path fail. error info is %s", logseq, callFuncInfo, event, err1))
//			return false
//		}
//	}
//	return true
//}
//func syncBreakPointF(config *GlobalParametersConfig) bool {
//	return config.rules.Sync.Breakpoint
//}
//func breakPointFuncMap() map[string]reflect.Value {
//	return map[string]reflect.Value{
//		"sync": reflect.ValueOf(syncBreakPointF),
//	}
//}
//func getBreakPointStatus(config *GlobalParametersConfig) bool {
//	if funcName, ok := breakPointFuncMap()[config.rules.CheckMode]; ok {
//		args := []reflect.Value{
//			reflect.ValueOf(config),
//		}
//		l := funcName.Call(args)
//		return l[0].Interface().(bool)
//	}
//	return false
//}
//func expensiveOperation(wg *sync.WaitGroup, message mq.Message, done chan struct{}, sum int64, logseq int64) {
//	var (
//		tableName = string(message.Body)
//		plan      = message.Properties["plan"].(map[string]*SchedulePlan2)[tableName]
//		event     = "[expensiveOperation]"
//		vlog      string
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	defer func() {
//		wg.Done()
//	}()
//	vlog = fmt.Sprintf("(%d) %v %v Start monitoring table %v.%v tasks and when to start the exit mechanism.", logseq, callFuncInfo, event, plan.schema, plan.table)
//	global.Wlog.Debug(vlog)
//	ticker := time.NewTicker(global.MonitorInterval * time.Second)
//	for {
//		select {
//		case <-ticker.C:
//			if e := atomic.LoadInt64(plan.errEnd); e < 0 {
//				done <- struct{}{}
//
//				return
//			}
//			if atomic.LoadInt64(plan.mqProductCustomerMonitor.mqStartTiming) == sum {
//				if atomic.LoadInt64(plan.mqProductCustomerMonitor.mqLevelProductSend) == atomic.LoadInt64(plan.mqProductCustomerMonitor.mqLevelCustomerReceive) {
//					done <- struct{}{}
//					vlog = fmt.Sprintf("(%d) %v %v Table %v.%v starts to start the exit task detection mechanism.", logseq, callFuncInfo, event, plan.schema, plan.table)
//					global.Wlog.Debug(vlog)
//					return
//				}
//			}
//		}
//	}
//}
//func syncXlsWriterTerminalData(plan *SchedulePlan2, logseq int64) error {
//	var (
//		event                        = "[xlsWriterTerminalData]"
//		vlog                         string
//		syncStatus                   string
//		err                          error
//		tableCost                    string
//		tableCostInt                 float64
//		migrationRate                string
//		sourceDataSize, destDataSize float64
//		dataExpansionRatio           string
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	vlog = fmt.Sprintf("(%d) %v %v Start writing the execution results of table %v.%v into xls...", logseq, callFuncInfo, event, plan.schema, plan.table)
//	global.Wlog.Debug(vlog)
//
//	if v, ok := terminalPods[getTableName(plan.schema, plan.table)]; ok {
//		vlog = fmt.Sprintf("(%d) %v %v Start writing the execution results of table %v.%v into xls...", logseq, callFuncInfo, event, plan.schema, plan.table)
//		global.Wlog.Debug(vlog)
//		if e := atomic.LoadInt64(plan.errEnd); e < 0 {
//			v.Sync.SyncInfo = "error"
//			syncStatus = "error"
//			vlog = fmt.Sprintf("(%d) %v %v There is an error in table %v.%v Modify the result status information to error.", logseq, callFuncInfo, event, plan.schema, plan.table)
//			global.Wlog.Warn(vlog)
//		} else {
//			syncStatus = "finish"
//		}
//		v.Sync.Seq = fmt.Sprintf("%v", plan.taskSeq)
//		//sheet1
//		d := map[string]any{"sheet": GlobalPConfigs.result.XlsV.XlsSheetNameGather[0],
//			"data": v.Sync}
//		ResultStatisticsMq <- d
//		//sheet2
//		tableCost = global.Seconed(plan.subTaskBeginTime, time.Now().UnixNano())
//		if tableCostInt, err = strconv.ParseFloat(tableCost, 10); err != nil {
//			return err
//		}
//		var datasize map[string]float64
//		migrationRate = global.Commercial(*plan.subTaskInfo.DAccumulate, int64(tableCostInt))
//
//		if datasize, err = GlobalISObjects.TableMetaData["source"].TableDataSize(GlobalPConfigs.SDB["single"], plan.schema, plan.table, logseq); err != nil {
//			return err
//		} else {
//			sourceDataSize = datasize["dataSizeMB"] + datasize["indexSizeMB"]
//		}
//		if datasize, err = GlobalISObjects.TableMetaData["dest"].TableDataSize(GlobalPConfigs.DDB["single"], plan.schema, plan.table, logseq); err != nil {
//			return err
//		} else {
//			destDataSize = datasize["dataSizeMB"] + datasize["indexSizeMB"]
//		}
//		dataExpansionRatio = global.Commercial(int64(destDataSize), int64(sourceDataSize))
//		resultData := global.XSyncsheetTatil{Seq: fmt.Sprintf("%v", plan.taskSeq), Schema: plan.schema,
//			Table: plan.table, SyncSum: plan.subTaskInfo.RecordCount, SelectRows: plan.subTaskInfo.SAccumulate, InsertRows: plan.subTaskInfo.DAccumulate,
//			SyncStatus: syncStatus, TableCost: tableCost, MigrationRate: migrationRate, SourceDataSize: fmt.Sprintf("%v", sourceDataSize), DestDataSize: fmt.Sprintf("%v", destDataSize),
//			DataExpansionRatio: dataExpansionRatio, Comment: fmt.Sprintf("%v", plan.subTaskInfo.ErrorInfo.Load())}
//		BarResultStatisticMq <- resultData
//		vlog = fmt.Sprintf("(%d) %v %v The execution result of table %v.%v is sent successfully. result is {%v}", logseq, callFuncInfo, event, plan.schema, plan.table, d)
//		global.Wlog.Debug(vlog)
//	} else {
//		vlog = fmt.Sprintf("(%d) %v %v Table %v.%v does not exist and the execution results cannot be written to xls!!", logseq, callFuncInfo, event, plan.schema, plan.table)
//		global.Wlog.Warn(vlog)
//	}
//	return nil
//}
//func writeBreakPointData(fin *os.File, s any, logseq int64) error {
//	WriteTextI := outPut.FileOut("text", map[string]any{"filen": fin, "fileName": global.BreeakPointFileName, "logSeq": logseq,
//		"bufSize": 1024 * 1024 * 4, "sqlType": "sql", "rowsS": "\n",
//	})
//	if !WriteTextI.AppendWrite("", s) {
//		err := errors.New(fmt.Sprintf("%v", `write data fail!`))
//		return err
//	}
//	return nil
//}
//func analyzeTable(plan *SchedulePlan2, logseq int64) error {
//	var (
//		vlog  string
//		event = "[analyzeTable]"
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	plan.dbf.AppTable.AppType = "analyze"
//	plan.dbf.DataFixType = "table"
//	vlog = fmt.Sprintf("(%v) %v %v Start the analyze operation of table %v.%v", logseq, callFuncInfo, event, plan.schema, plan.table)
//	global.Wlog.Debug(vlog)
//
//	if err := dbExec.ApplyDataFix(plan.dbf).ApplySql(global.ApplySqlP{
//		DB:        GlobalPConfigs.DDB,
//		ShardName: "single",
//		LogSeq:    logseq,
//	}); err != nil {
//		vlog = fmt.Sprintf("(%v) %v %v The analyze operation of table %v.%v failed. error info is {%v}", logseq, callFuncInfo, event, plan.schema, plan.table, err)
//		global.Wlog.Error(vlog)
//		return err
//	}
//	vlog = fmt.Sprintf("(%v) %v %v The analyze operation of table %v.%v was successful.", logseq, callFuncInfo, event, plan.schema, plan.table)
//	global.Wlog.Debug(vlog)
//	return nil
//}
//
//func getXlsWriteTerminalFunc() map[string]reflect.Value {
//	return map[string]reflect.Value{
//		"sync": reflect.ValueOf(syncXlsWriterTerminalData),
//	}
//}
//func xlsWriterTerminalData(plan *SchedulePlan2, logseq int64) error {
//	if funcName, ok := getXlsWriteTerminalFunc()[GlobalPConfigs.rules.CheckMode]; ok {
//		if funcName.Kind() == reflect.Func {
//			args := []reflect.Value{
//				reflect.ValueOf(plan),
//				reflect.ValueOf(logseq),
//			}
//			s := funcName.Call(args)
//			return errors.New(fmt.Sprintf("%v", s[0].Interface()))
//		}
//	}
//	return nil
//}
//
//// 每个表的mq消息状态监听，结束时发送状态给bar
//func mqTableStatusMonitor(done chan struct{}, wg *sync.WaitGroup, plan *SchedulePlan2, logseq int64) {
//	var (
//		event = "[mqTableStatusMonitor]"
//		vlog  string
//	)
//	pc, _, _, _ := runtime.Caller(1)
//	callingFunc := runtime.FuncForPC(pc)
//	callFile, callLine := callingFunc.FileLine(pc)
//	callFunc := callingFunc.Name()
//	callFuncInfo := fmt.Sprintf("Call functions %v Call File %v:%v", callFunc, callFile, callLine)
//	defer func() {
//		wg.Done()
//	}()
//	vlog = fmt.Sprintf("(%d) %v %v Start monitoring the mq queue status of table %v.%v. The default timeout is 360 days.", logseq, callFuncInfo, event, plan.schema, plan.table)
//	global.Wlog.Debug(vlog)
//	for {
//		select {
//		case d, ok := <-plan.mqProductCustomerMonitor.ProductMessage:
//			if !ok {
//				if e := atomic.LoadInt64(plan.errEnd); e < 0 {
//					vlog = fmt.Sprintf("(%d) %v %v An error occurs during the execution of table %v.%v, and error handling is performed.", logseq, callFuncInfo, event, plan.schema, plan.table)
//					global.Wlog.Warn(vlog)
//					if err := analyzeTable(plan, logseq); err != nil {
//						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
//						return
//					}
//					if err := xlsWriterTerminalData(plan, logseq); err != nil {
//						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
//						return
//					}
//					PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "error")
//					return
//				}
//				if *plan.mqProductCustomerMonitor.mqProductSeq == *plan.mqProductCustomerMonitor.mqCustomerSeq {
//					vlog = fmt.Sprintf("(%d) %v %v It is detected that the message queue production quantity and consumption quantity in table %v.%v are equal.", logseq, callFuncInfo, event, plan.schema, plan.table)
//					global.Wlog.Debug(vlog)
//					if err := analyzeTable(plan, logseq); err != nil {
//						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
//						return
//					}
//					st := getTableName(plan.schema, plan.table)
//					if err := writeBreakPointData(GlobalPConfigs.rules.BreakPointFin, []*string{&st}, logseq); err != nil {
//						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
//						return
//					}
//					if err := xlsWriterTerminalData(plan, logseq); err != nil {
//						PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
//						return
//					}
//					PlanContext.TaskBarSubsStatusSwap(plan.subTaskInfo, "finish")
//					vlog = fmt.Sprintf("(%v) %v %v Table %v.%v data migration is completed.", logseq, callFuncInfo, event, plan.schema, plan.table)
//					global.Wlog.Info(vlog)
//					return
//				}
//			} else {
//				for _, v := range d {
//					atomic.AddInt64(plan.mqProductCustomerMonitor.mqProductSeq, int64(len(v)))
//				}
//			}
//		case <-done:
//			close(plan.mqProductCustomerMonitor.ProductMessage)
//			vlog = fmt.Sprintf("(%d) %v %v It is detected that there is no update in the message queue of table %v.%v within 5 seconds, and the queue writing is closed.", logseq, callFuncInfo, event, plan.schema, plan.table)
//			global.Wlog.Debug(vlog)
//		}
//	}
//}
//
//// NewSchedulePlanConfig 初始化全局共享变量
//func NewSchedulePlanConfig(l *SchemaInitContextConfig) (*GlobalParametersConfig, error) {
//	var (
//		err       error
//		SDB, DDB  map[string]*sql.DB
//		timestamp = time.Now().Format("2006-01-02T15-04-05")
//		m         = l.GtCheckConfig
//		tl        = l.TableList
//	)
//	if SDB, err = dbExec.OpenDb(dbExec.OpenDB{
//		Dbtype:          m.SecondaryL.DSnsV.SrcDBName,
//		DBDevice:        m.SecondaryL.DSnsV.SrcDrive,
//		JDBC:            m.SecondaryL.DSnsV.SrcJdbc,
//		MaxOpenConns:    m.SecondaryL.DBPoolV.MaxOpenConn,
//		MaxIdleConns:    m.SecondaryL.DBPoolV.MaxIdleConn,
//		ConnMaxLifetime: m.SecondaryL.DBPoolV.ConnMaxLifeTime,
//		ConnMaxIdleTime: m.SecondaryL.DBPoolV.ConnMaxIdleTime,
//	}).Open(false); err != nil {
//		return nil, err
//	}
//	if DDB, err = dbExec.OpenDb(dbExec.OpenDB{
//		Dbtype:          m.SecondaryL.DSnsV.DestDBName,
//		DBDevice:        m.SecondaryL.DSnsV.DestDrive,
//		JDBC:            m.SecondaryL.DSnsV.DestJdbc,
//		MaxOpenConns:    m.SecondaryL.DBPoolV.MaxOpenConn,
//		MaxIdleConns:    m.SecondaryL.DBPoolV.MaxIdleConn,
//		ConnMaxLifetime: m.SecondaryL.DBPoolV.ConnMaxLifeTime,
//		ConnMaxIdleTime: m.SecondaryL.DBPoolV.ConnMaxIdleTime,
//	}).Open(true); err != nil {
//		return nil, err
//	}
//	config := &GlobalParametersConfig{
//		repair:       m.SecondaryL.RepairV,
//		rules:        m.SecondaryL.RulesV,
//		dSns:         m.SecondaryL.DSnsV,
//		schema:       m.SecondaryL.SchemaV,
//		TableList:    tl,
//		log:          m.SecondaryL.LogV,
//		structs:      m.SecondaryL.RulesV.Struct,
//		result:       m.SecondaryL.ResultV,
//		activeSwitch: m.SecondaryL.ActiveSwitchV,
//		SDB:          SDB,
//		DDB:          DDB,
//	}
//	global.GTableCurry = make(chan struct{}, m.SecondaryL.RulesV.Mtc)
//	//初始化输出目录result
//	for _, v := range []string{"result"} {
//		if !dirInit(v, 1) {
//			return nil, errors.New(fmt.Sprintf("%v", "init dir fail."))
//		}
//	}
//	var xlsFileName string
//	if config.result.GeneralResult {
//		xlsFileName = config.result.XlsV.XlsFileName
//		if !strings.Contains(xlsFileName, "/") {
//			xlsFileName = fmt.Sprintf("./result/%v_%v", xlsFileName, timestamp)
//		}
//	}
//	if config.result.GeneralResult {
//		ResultStatisticsGeneral = ResultGeneral{Xls: XlsResult{
//			XlsFileName:        xlsFileName,
//			XlsSheetNameGather: config.result.XlsV.XlsSheetNameGather,
//			XlsTailGather:     config.result.XlsV.XlsTailGather,
//			XlsSafetySwitch:    config.result.XlsV.XlsSafetySwitch,
//		}, Type: "xls"}
//		if ResultStatisticsGeneral.InitResultSaveFile("xls") {
//			ResultStatisticsMq = make(chan any, config.rules.QueueSize)
//		}
//	}
//	BarResultStatisticMq = make(chan any, config.rules.QueueSize)
//	if getBreakPointStatus(config) {
//		if config.rules.BreakPointFin, err = os.OpenFile(fmt.Sprintf("%v", global.BreeakPointFileName), os.O_APPEND|os.O_RDWR|os.O_CREATE, 0664); err != nil {
//			global.Wlog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
//			return nil, err
//		}
//	} else {
//		if config.rules.BreakPointFin, err = os.OpenFile(fmt.Sprintf("%v", global.BreeakPointFileName), os.O_TRUNC|os.O_APPEND|os.O_RDWR|os.O_CREATE, 0664); err != nil {
//			global.Wlog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
//			return nil, err
//		}
//	}
//	if config.rules.ActiveDataFin, err = os.OpenFile(fmt.Sprintf("%v_%v.result", global.ActiveDataResultFileName, timestamp), os.O_TRUNC|os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0664); err != nil {
//		global.Wlog.Error(fmt.Sprintf("init active data file fail. error info is %s", err))
//		return nil, err
//	}
//	if config.TtyBarFin, err = os.OpenFile(fmt.Sprintf("%v_%v.result", global.MonitorBarFinishFileName, timestamp), os.O_TRUNC|os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0664); err != nil {
//		global.Wlog.Error(fmt.Sprintf("init bar monitor finish file fail. error info is %s", err))
//		return nil, err
//	}
//	closeObject = quit.GetClose(map[string]any{
//		"dsn": map[string]any{"name": m.SecondaryL.DSnsV.DestDBName,
//			"drive": m.SecondaryL.DSnsV.DestDrive,
//			"jdbc":  m.SecondaryL.DSnsV.DestJdbc,
//		},
//	}, DDB, []*os.File{config.rules.BreakPointFin, config.rules.ActiveDataFin, config.TtyBarFin}, ResultStatisticsGeneral.SaveFile)
//	//
//	global.GetIoc().RegisterContext("globalConfig", config)
//	global.GetIoc().RegisterContext("closeObject", closeObject)
//	return config, nil
//}
//
//// NewSchemaInitConfig 创建执行计划配置参数
//func NewSchemaInitConfig() {
//	var (
//		sdnInput = "single"
//	)
//	gobject := &GlobalISObject{}
//	gobject.TableDataQuery = map[string]dbExec.IndexColumnStruct{
//		"source": {Drivce: GlobalPConfigs.dSns.SrcDrive, DBType: GlobalPConfigs.dSns.SrcDBName, DB: GlobalPConfigs.SDB[sdnInput]},
//		"dest":   {Drivce: GlobalPConfigs.dSns.DestDrive, DBType: GlobalPConfigs.dSns.DestDBName, DB: GlobalPConfigs.DDB[sdnInput]},
//	}
//	gobject.TableMetaData = map[string]dbExec.TableStructMetadataer{
//		"source": dbExec.Instance(
//			&dbExec.TableMetadata{Drive: GlobalPConfigs.dSns.SrcDrive,
//				DBname: GlobalPConfigs.dSns.SrcDBName}),
//		"dest": dbExec.Instance(
//			&dbExec.TableMetadata{Drive: GlobalPConfigs.dSns.DestDrive,
//				DBname: GlobalPConfigs.dSns.DestDBName}),
//	}
//	global.GetIoc().RegisterContext("isObject", gobject)
//	GlobalISObjects = gobject
//	return
//}
//func initSchemaBean(config *SchemaInitContextConfig) error {
//	var (
//		err error
//	)
//	//注册输入配置
//	global.GetIoc().RegisterContext("configParameter", config.GtCheckConfig)
//	//初始化全局共享配置 	//注册执行计划配置 SchedulePlanConfig
//	GlobalPConfigs, err = NewSchedulePlanConfig(config)
//	if err != nil {
//		return err
//	}
//	if GlobalPConfigs == nil {
//		return errors.New("初始化 NewSchedulePlanConfig 执行计划配置失败")
//	}
//	//初始化结构体或结构对象并注册
//	if NewSchemaInitConfig(); GlobalISObjects == nil {
//		return errors.New("初始化 NewSchemaInitConfig 执行计划配置失败")
//	}
//	return nil
//}
//
//// 初始化上下文，该模块的全局配置，过滤表名，按照join、left、unio方式
//func schemaContext(config *SchemaInitContextConfig) *CheckSchemaMetaData {
//	if SchemaContext == nil {
//		lock.Lock()
//		defer lock.Unlock()
//		meta := GetCheckSchemaMetaData()
//		if err := initSchemaBean(config); err != nil {
//			fmt.Println("创建执行计划上下文 NewSchedulePlanContext 失败", err)
//			return nil
//		}
//		l := metaD.GetTableFitleO()
//		o1 := metaD.GetSchemaObject()
//		for _, v := range config.GtCheckConfig.SecondaryL.SchemaV.TableInput {
//			var keys, missKeys []string
//			//初始化执行计划表
//			l.TableMetaData = GlobalISObjects.TableMetaData
//			l.TableJoinFay = v.TableJoin
//			l.MapTables = GlobalPConfigs.schema.MapTables
//			l.SDB = GlobalPConfigs.SDB["single"]
//			l.DDB = GlobalPConfigs.DDB["single"]
//			for k, v1 := range l.InitToSchema2Table(v.Tables) {
//				if strings.EqualFold(v1, "ok") {
//					keys = append(keys, k)
//				} else {
//					missKeys = append(missKeys, k)
//				}
//			}
//			if len(keys) > 0 {
//				o := metaD.GetSchemaObject()
//				o.TableList = keys
//				o.TableMiss = missKeys
//				o1.TaskTableList = append(o1.TaskTableList, o)
//			}
//		}
//		//注册ioc 容器
//		global.GetIoc().RegisterContext("schemaContext", o1)
//		for _, v := range o1.TaskTableList {
//			v1 := metaD.GetMetaDataO()
//			v1.TableList = v.TableList
//			v1.TableMiss = v.TableMiss
//			v1.TableMetaData = l.TableMetaData
//			v1.Schema = config.GtCheckConfig.SecondaryL.SchemaV
//			v1.Rules = config.GtCheckConfig.SecondaryL.RulesV
//			v1.DSns = config.GtCheckConfig.SecondaryL.DSnsV
//			v1.SDB = GlobalPConfigs.SDB["single"]
//			v1.DDB = GlobalPConfigs.DDB["single"]
//			meta.TableMD = append(meta.TableMD, v1)
//		}
//		meta.ActiveSwitchV = config.GtCheckConfig.SecondaryL.ActiveSwitchV
//		return meta
//	}
//	return nil
//}
