package Gr

import (
	"context"
	"errors"
	"fmt"
	"db2s/ETL"
	ea "db2s/encryptionAlgorithm"
	"db2s/global"
	"db2s/parDef"
	"db2s/ref"
	"reflect"
	"sort"
	"strconv"
	"strings"
)

func (tm TableMe) Name(ctx context.Context) (result global.Return, err error) {
	//var event = "[Name]"
	//var execSql []string
	//execSql = append(execSql, fmt.Sprintf("select table_NAME as `tableName` from information_Schema.TABLES where TABLE_TYPE = 'BASE TABLE'  AND table_schema ='%v' %v",
	//	parameter.Object.Schema, func() string {
	//		switch {
	//		case parameter.Object.Table == "*":
	//			return ""
	//		case strings.Contains(parameter.Object.Table, "%"):
	//			return fmt.Sprintf("and table_name like '%v'", parameter.Object.Table)
	//		default:
	//			return fmt.Sprintf("and table_name = '%v'", parameter.Object.Table)
	//		}
	//	}()))
	//if result, err = forExecQuerySql(event, parameter, dataDispos.StringGarth, execSql); err != nil || result.Result == nil {
	//	return
	//}
	return
}
func (tm TableMe) Existence(ctx context.Context) (result global.Return, err error) {
	//var event = "[Existence]"
	//var execSql []string
	//execSql = append(execSql, fmt.Sprintf("select table_name from information_Schema.TABLES where TABLE_TYPE = 'BASE TABLE'  AND table_schema ='%v'  and table_NAME = '%v'", parameter.Object.Schema, parameter.Object.Table))
	//if result, err = forExecQuerySql(event, parameter, dataDispos.String, execSql); err != nil || result.Result == nil {
	//	return
	//}
	//switch {
	//case strings.EqualFold(fmt.Sprintf("%v", result.Result), parameter.Object.Table):
	//	result.Result = true
	//default:
	//	result.Result = false
	//}
	return
}
func (tm TableMe) Schema(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[Schema]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT  table_schema FROM information_schema.tables WHERE TABLE_TYPE = 'BASE TABLE'  AND table_schema = '%v' ", parameter.Object.Schema))
	if result, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil || result.Result == nil {
		return
	}
	switch {
	case strings.EqualFold(fmt.Sprintf("%v", result.Result), parameter.Object.Table):
		result.Result = true
	default:
		result.Result = false
	}
	return
}
func (tm TableMe) Comment(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[Comment]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT TABLE_COMMENT as `comment` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%v' AND TABLE_NAME='%v'", parameter.Object.Schema, parameter.Object.Table))
	if result, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil || result.Result == nil {
		return
	}
	return
}
func (tm TableMe) Character(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[Comment]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT REGEXP_SUBSTR(TABLE_COLLATION, '^[^_]+') AS `character` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%v' AND TABLE_NAME='%v'", parameter.Object.Schema, parameter.Object.Table))
	if result, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil || result.Result == nil {
		return
	}
	return
}
func (tm TableMe) Collation(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[Comment]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT TABLE_COLLATION as `collation` FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%v' AND TABLE_NAME='%v'", parameter.Object.Schema, parameter.Object.Table))
	if result, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil || result.Result == nil {
		return
	}
	return
}
func (tm TableMe) Constraints(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[Constraints]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT \n\t\t"+
		"tc.CONSTRAINT_NAME as `name`,\n    "+
		"cc.CHECK_CLAUSE as `body`\n"+
		"FROM \n    "+
		"information_schema.TABLE_CONSTRAINTS AS tc\n"+
		"JOIN \n    "+
		"information_schema.CHECK_CONSTRAINTS AS cc \n    "+
		"ON tc.CONSTRAINT_NAME = cc.CONSTRAINT_NAME\n"+
		"WHERE \n    "+
		"tc.TABLE_SCHEMA = '%v' \n    "+
		"AND tc.TABLE_NAME = '%v'\n    "+
		"AND tc.CONSTRAINT_TYPE = 'CHECK';", parameter.Object.Schema, parameter.Object.Table))
	if result, err = forExecQuerySql(event, parameter, ETL.SMap, execSql); err != nil {
		err = ref.ErrAddPrintf("GreatDB.Constraints", err)
		return
	}
	if result.Result == nil {
		return
	}
	var res []parDef.Constraints
	for _, v := range result.Result.([]map[string]interface{}) {
		vv := mapNullActive(v)
		var re parDef.Constraints
		if err = ref.MapToStruct(vv, &re); err != nil {
			err = ref.ErrAddPrintf("GreatDB.Constraints", err)
			return
		}
		res = append(res, re)
	}
	result.Result = res
	return
}
func (tm TableMe) DefaultSchemaCharacter(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[DefaultSchemaCharacter]"
	var execSql []string
	execSql = append(execSql, fmt.Sprintf("SELECT DEFAULT_CHARACTER_SET_NAME as `character` FROM INFORMATION_SCHEMA.SCHEMATA where SCHEMA_NAME='%v'", parameter.Object.Schema))
	if result, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil || result.Result == nil {
		return
	}
	return
}
func (tm TableMe) Drop(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[Drop]"
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.Drop", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//result.Sql = fmt.Sprintf("DROP TABLE IF EXISTS `%v`.`%v`", parameter.Object.Schema, parameter.Object.Table)
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: []string{result.Sql},
	//}); err != nil {
	//	err = ref.ErrAddPrintf("GreatDB.Drop", err)
	//	return
	//}
	return
}

func (tm TableMe) Rename(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[Rename]"
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.Rename", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//result.Sql = fmt.Sprintf("RENAME TABLE `%v`.`%v` TO `%v`.`%v`", parameter.Object.Schema, parameter.Object.Table, parameter.Object.RecoverSchema, parameter.Object.RecoverTable)
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: []string{result.Sql},
	//}); err != nil {
	//	err = ref.ErrAddPrintf("GreatDB.Rename", err)
	//	return
	//}
	return
}
func (tm TableMe) Recover(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[Recover]"
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.Recover", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//var execSql []string
	//execSql = append(execSql, fmt.Sprintf("CREATE DATABASE IF EXISTS `%v`", parameter.Object.RecoverSchema))
	//execSql = append(execSql, fmt.Sprintf("CREATE TABLE `%v`.`%v` like `%v`.`%v`", parameter.Object.RecoverSchema, parameter.Object.RecoverTable, parameter.Object.Schema, parameter.Object.Table))
	//execSql = append(execSql, fmt.Sprintf("RENAME TABLE `%v`.`%v` TO `%v`.`%v`", parameter.Object.Schema, parameter.Object.Table, parameter.Object.RecoverSchema, parameter.Object.RecoverTable))
	//execSql = append(execSql, fmt.Sprintf("RENAME TABLE `%v`.`%v` TO `%v`.`%v`", parameter.Object.RecoverSchema, parameter.Object.RecoverTable, parameter.Object.Schema, parameter.Object.Table))
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: execSql,
	//}); err != nil {
	//	err = ref.ErrAddPrintf("GreatDB.Recover", err)
	//	return
	//}
	return
}
func (tm TableMe) Truncate(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[Truncate]"
	//var execSql []string
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.Truncate", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//for _, partName := range parameter.Object.TruncatePartition {
	//	switch partName {
	//	case "single":
	//		execSql = append(execSql, fmt.Sprintf("TRUNCATE TABLE `%v`.`%v`", parameter.Object.Schema, parameter.Object.Table))
	//	default:
	//		execSql = append(execSql, fmt.Sprintf("ALTER TABLE `%v`.`%v` TRUNCATE PARTITION `%v`", parameter.Object.Schema, parameter.Object.Table, partName))
	//	}
	//}
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: execSql,
	//}); err != nil {
	//	err = ref.ErrAddPrintf("GreatDB.Truncate", err)
	//	return
	//}
	return
}
func (tm TableMe) PartNameToShardName(parameter parDef.Parameter) (result global.Return, err error) {
	result.Result = parameter.Object.Table
	return
}
func (tm TableMe) Analyze(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[Analyze]"
	//var execSql []string
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.Analyze", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//execSql = append(execSql, fmt.Sprintf("use `%v`", parameter.Object.Schema))
	//execSql = append(execSql, fmt.Sprintf("analyze table `%v`", parameter.Object.Table))
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: execSql,
	//}); err != nil {
	//	err = ref.ErrAddPrintf("GreatDB.Analyze", err)
	//	return
	//}
	return
}

// filePoint outPut.FileOperations
func (tm TableMe) writeFileDDL(parameter parDef.Parameter) (result global.Return, err error) {
	var event = "[writeFileDDL]"
	defer func() {
		if re := recover(); re != nil {
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
		}
	}()
	result.Sql = parameter.ExecDDL.Sql
	if parameter.ExecDDL.FilePoint == nil {
		err = ref.ErrAddPrintf(event, errors.New("file point is nil"))
		return
	}
	if err = parameter.ExecDDL.FilePoint.Write("", result.Sql); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	return
}
func (tm TableMe) connExecDDL(parameter parDef.Parameter) (result global.Return, err error) {
	//var event = "[connExecDDL]"
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//result.Sql = parameter.ExecDDL.Sql
	//if result.TimeOut, result.Result, err = newExecUpdateSql(ExecSqlInputS{
	//	Ss:           parameter,
	//	Event:        event,
	//	UpdateSqlStr: []string{result.Sql},
	//}); err != nil {
	//	err = ref.ErrAddPrintf(event, err)
	//	return
	//}
	return
}
func (tm TableMe) ExecDDL(ctx context.Context) (result global.Return, err error) {
	//defer func() {
	//	if re := recover(); re != nil {
	//		err = ref.ErrAddPrintf("GreatDB.ExecDDL", errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
	//	}
	//}()
	//switch parameter.Options.RepairMethod {
	//case "table":
	//	return tm.connExecDDL(parameter)
	//case "file":
	//	return tm.writeFileDDL(parameter)
	//default:
	//	err = ref.ErrAddPrintf("GreatDB.ExecDDL", errors.New(fmt.Sprintf("type not matched. curry type is %v", parameter.Options.RepairMethod)))
	//}
	return
}
func (tm TableMe) getDefaultCharsetNum(parameter parDef.Parameter) (result int, err error) {
	var res global.Return
	var event = "[getDefaultCharsetNum]"
	defer func() {
		if re := recover(); re != nil {
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
		}
	}()
	if res, err = tm.DefaultSchemaCharacter(parameter); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	switch strings.ToLower(fmt.Sprintf("%v", res.Result)) {
	case "utf8mb3":
		result = 3
	case "utf8mb4":
		result = 4
	default:
		err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("type not matched. curry type is %v", reflect.TypeOf(res.Result))))
	}
	return
}
func (tm TableMe) GetGIPKSwitch(parameter parDef.Parameter) (result bool, err error) {
	var event = "[GetGIPKSwitch]"
	var execSql []string
	var res global.Return
	defer func() {
		if re := recover(); re != nil {
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
		}
	}()
	execSql = append(execSql, fmt.Sprintf("select VARIABLE_VALUE as `value` from performance_schema.global_variables where Variable_name='%v'", "sql_generate_invisible_primary_key"))
	if res, err = forExecQuerySql(event, parameter, ETL.String, execSql); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	if res.Result == nil {
		return
	}
	if strings.EqualFold(strings.TrimSpace(fmt.Sprintf("%v", res.Result)), "ON") {
		result = true
	}
	return
}
func addInitGIPKData(metaBase MetaBaseInitResult) (result MetaBaseInitResult, err error) {
	result = metaBase
	var event = "[addInitGIPKData]"
	defer func() {
		if re := recover(); re != nil {
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
		}
	}()
	for _, c := range metaBase.columnMeta {
		if c.AutoIncrement {
			return
		}
	}
	for _, v := range metaBase.indexMeta {
		if v.ColumnKey == "P" {
			return
		}
	}
	result.indexMeta = append(result.indexMeta, parDef.IndexColumnMe{
		Schema:     metaBase.columnMeta[0].DatabaseName,
		Table:      metaBase.columnMeta[0].TableName,
		IndexName:  "PRIMARY",
		ColumnMate: []parDef.IdxSubColumnMe{{ColumnName: "my_row_id", ColumnType: "bigint", IndexSeq: "1"}},
		Null:       "NO",
		ColumnKey:  "P",
	})
	result.columnMeta = append(result.columnMeta, columnMeta{
		DatabaseName:  metaBase.columnMeta[0].DatabaseName,
		TableName:     metaBase.columnMeta[0].TableName,
		ColumnName:    "my_row_id",
		ColumnType:    "bigint unsigned",
		ColumnSeq:     strconv.Itoa(len(result.columnMeta) + 1),
		IsNull:        "NOT NULL",
		ColumnComment: "autoIncrementId",
		Invisible:     true,
		AutoIncrement: true,
	})
	result.columnMetaMap = append(result.columnMetaMap, parDef.ColMetaMapS{
		DatabaseName: metaBase.columnMeta[0].DatabaseName,
		TableName:    metaBase.columnMeta[0].TableName,
		ColumnName:   "my_row_id",
		TypeBelong:   "numerical",
		Type: parDef.ColTypeValueMap{
			MinValue:  -9223372036854775808,
			MaxValue:  9223372036854775807,
			ShowValue: 9223372036854775807,
			UseByte:   8,
			Symbol:    true,
		},
		ColNullMap:       "NO",
		ColAutoIncrement: true,
		Invisible:        true,
		ColumnComment:    "autoIncrementId",
	})
	return
}
func (tm TableMe) GetGIPKAction(parameter parDef.Parameter, metaBase MetaBaseInitResult) (res MetaBaseInitResult, err error) {
	var gipSwitch bool
	res = metaBase
	var event = "[GetGIPKAction]"
	defer func() {
		if re := recover(); re != nil {
			err = ref.ErrAddPrintf(event, errors.New(fmt.Sprintf("defer recover An exception was captured. abnormal is %v ", re)))
		}
	}()
	if gipSwitch, err = tm.GetGIPKSwitch(parameter); err != nil {
		err = ref.ErrAddPrintf(event, err)
	}
	if !gipSwitch {
		return
	}
	if res, err = addInitGIPKData(metaBase); err != nil {
		err = ref.ErrAddPrintf(event, err)
	}
	return
}
func uniqueAddPartition(colKey string, indexCol, partCol []string) (newColumn []string) {
	for _, vv := range indexCol {
		newColumn = append(newColumn, vv)
	}
	switch colKey {
	case "U", "P":
		if add, _ := ea.CheckSum().Arrcmp(partCol, newColumn); len(add) > 0 {
			newColumn = append(newColumn, add...)
		}
	}
	return
}
func modifyIndexColumnAddPartColumn(im MetaBaseInitResult, partitionColumn []string) (res MetaBaseInitResult) {
	var partCol []string
	res = im
	for _, y := range partitionColumn {
		partCol = append(partCol, fmt.Sprintf("`%v`", y))
	}
	var indexToColumn = make(map[string][]string)
	for _, v := range im.indexMeta {
		if v.ColumnKey == "I" {
			continue
		}
		var newColumn []string
		for _, vv := range v.ColumnMate {
			var column = fmt.Sprintf("`%v`", vv.ColumnName)
			newColumn = append(newColumn, column)
		}
		indexToColumn[v.IndexName] = newColumn
	}
	var indexModifyCol = make(map[string][]string)
	for k, v := range indexToColumn {
		if add, _ := ea.CheckSum().Arrcmp(partCol, v); len(add) > 0 {
			indexModifyCol[k] = append(indexModifyCol[k], add...)
		}
	}
	for k, v := range indexModifyCol {
		var newColumn = indexToColumn[k]
		if strings.EqualFold(k, "PRIMARY") {
			for _, col := range v {
				res.modifyNotNull = append(res.modifyNotNull, fmt.Sprintf("%v", strings.ReplaceAll(col, "`", "")))
			}
		}
		newColumn = append(newColumn, v...)
		res.switchRecord = append(res.switchRecord, modifyLogoRecord{Object: "index uniq", Logo: k, OldValue: strings.Join(indexToColumn[k], ","), NewValue: strings.Join(newColumn, ","), Reason: "primary or unique key must Include partition column"})
	}
	for k, v := range im.indexMeta {
		if vv, ok := indexModifyCol[v.IndexName]; ok {
			for _, va := range vv {
				for _, colMeta := range im.columnMeta {
					if strings.EqualFold(strings.ReplaceAll(va, "`", ""), colMeta.ColumnName) {
						res.indexMeta[k].ColumnMate = append(res.indexMeta[k].ColumnMate, parDef.IdxSubColumnMe{
							ColumnName: colMeta.ColumnName,
							ColumnType: colMeta.ColumnType,
							ColumnSeq:  colMeta.ColumnSeq,
							IndexSeq:   fmt.Sprintf("%v", len(v.ColumnMate)+1),
						})
					}
				}
			}
		}
	}
	return
}
func getIndexByte(indexCol []string, columnMetaMap []parDef.ColMetaMapS, factor int) (indexLengthDetails []int) {
	for _, v := range indexCol {
		for _, colm := range columnMetaMap {
			if strings.EqualFold(colm.ColumnName, v) {
				_, p := colTypeMapSwitch(colm, factor)
				indexLengthDetails = append(indexLengthDetails, p)
			}
		}
	}
	return
}
func getSingleIndexMaxLe(metaBase MetaBaseInitResult) (res MetaBaseInitResult) {
	res = metaBase
	for k, v := range res.indexMeta {
		var indexMaxLength int
		var indexLengthDetails []int
		for _, col := range v.ColumnMate {
			for _, colm := range metaBase.columnMetaMap {
				if strings.EqualFold(colm.ColumnName, col.ColumnName) {
					_, p := colTypeMapSwitch(colm, metaBase.factor)
					indexMaxLength += p
					indexLengthDetails = append(indexLengthDetails, p)
				}
			}
		}
		v.IndexLengthDetails = indexLengthDetails
		v.IndexMaxLength = indexMaxLength
		res.indexMeta[k] = v
	}
	return
}
func indexColSubPrefix(indexCol []string, indexColByte []int, factor int) (res []string) {
	var maxLength, avgLength int
	for _, v := range indexColByte {
		maxLength += v
	}
	avgLength = maxLength / len(indexColByte)
	if maxLength < 3702 {
		for _, v := range indexCol {
			res = append(res, fmt.Sprintf("`%v`", v))
		}
		return
	}
	for k, v := range indexColByte {
		if v > avgLength {
			res = append(res, fmt.Sprintf("`%v`(%v)", indexCol[k], avgLength/factor))
		} else {
			res = append(res, fmt.Sprintf("`%v`", indexCol[k]))
		}
	}
	return
}
func getSingleIndexColumn(metaBase MetaBaseInitResult) (res MetaBaseInitResult) {
	res = metaBase
	for k, v := range metaBase.indexMeta {
		var avgLength int
		if len(v.ColumnMate) > 0 {
			avgLength = 3072 / len(v.ColumnMate)
		}
		if v.IndexMaxLength > 3702 {
			var q = make([]parDef.IdxSubColumnMe, len(v.ColumnMate))
			for dk, dv := range v.IndexLengthDetails {
				if dv > avgLength {
					p := v.ColumnMate[dk]
					p.SubPart = avgLength / metaBase.factor
					q[dk] = p
				} else {
					q[dk] = v.ColumnMate[dk]
				}
			}
			v.ColumnMate = q
			res.indexMeta[k] = v
		}
	}
	return
}
func createIndexSql(metaBase MetaBaseInitResult) (res MetaBaseInitResult) {
	var creatText []string
	var indexColumnUniq = make(map[string]int)
	res = metaBase
	for _, v := range metaBase.indexMeta {
		var qq []string
		for _, q := range v.ColumnMate {
			var nq = fmt.Sprintf("`%v`", q.ColumnName)
			if q.SubPart > 0 {
				nq = fmt.Sprintf("`%v`(%v)", q.ColumnName, q.SubPart)
			}
			indexColumnUniq[q.ColumnName]++
			qq = append(qq, nq)
		}
		switch v.ColumnKey {
		case "I":
			creatText = append(creatText, fmt.Sprintf("KEY `%v` (%v)", v.IndexName, strings.Join(qq, ",")))
		case "U":
			var kk = v.IndexName
			if strings.EqualFold(v.IndexName, "primary") {
				kk = fmt.Sprintf("%v_uni", v.IndexName)
			}
			creatText = append(creatText, fmt.Sprintf("UNIQUE KEY `%v` (%v)", kk, strings.Join(qq, ",")))
		case "P":
			creatText = append(creatText, fmt.Sprintf("PRIMARY KEY `%v` (%v)", v.IndexName, strings.Join(qq, ",")))
		}
	}
	res.createIndexSql = fmt.Sprintf("%v", strings.Join(creatText, ",\n"))
	res.indexColumnUniq = indexColumnUniq
	return
}
func getAllPartCol(metaBase MetaBaseInitResult) (col []string) {
	var uniqMap = make(map[string]int)
	for _, v := range metaBase.partMe.PartCol {
		if _, ok := uniqMap[v]; !ok {
			col = append(col, v)
			uniqMap[v]++
		}
	}
	for _, v := range metaBase.partMe.SubPartCol {
		if _, ok := uniqMap[v]; !ok {
			col = append(col, v)
			uniqMap[v]++
		}
	}
	return
}
func CreateIndexBody(metaBase MetaBaseInitResult) (result MetaBaseInitResult) {
	//im, _ := s.TableMeta.IndexMeta["source"]
	result = metaBase
	//索引名及对应的索引类型集合
	result = modifyIndexColumnAddPartColumn(result, getAllPartCol(result))
	//result.recover = append(result.recover, columnRecover...)
	//获取索引列最大长度
	result = getSingleIndexMaxLe(result)
	//result.indexColumnGarth = indexOptimizerResult.indexColumnGarth
	//单个索引长度最大限制检查、前缀索引处理及长度超长改造
	result = getSingleIndexColumn(result)
	//result.recover = append(result.recover, newColumnRecover...)
	result = createIndexSql(result)
	return
}
func newColumnTypeOptions(a parDef.ColMetaMapS, factor int, modifyText []typeModifyRecord) (rR []modifyLogoRecord, ch string) {
	ch, _ = colTypeMapSwitch(a, factor)
	for _, b := range modifyText {
		if strings.EqualFold(b.columnName, a.ColumnName) {
			ch = b.newType
			switch a.TypeBelong {
			case "char", "varchar":
				rR = append(rR, modifyLogoRecord{
					Object:   "table size",
					Logo:     a.ColumnName,
					OldValue: b.oldType,
					NewValue: b.newType,
					Reason:   "table length >65527",
				})
			case "decimal":
				rR = append(rR, modifyLogoRecord{
					Object:   "partition column",
					Logo:     a.ColumnName,
					OldValue: b.oldType,
					NewValue: b.newType,
					Reason:   "partition column not is decimal",
				})
			}

		}
	}
	return
}
func newColumnNullOptions(a parDef.ColMetaMapS, notNull []string) (rR []modifyLogoRecord, ch string) {
	ch = fmt.Sprintf(" NULL ")
	if strings.EqualFold(a.ColNullMap, "NO") {
		ch = fmt.Sprintf(" NOT NULL ")
	} else {
		for _, z := range notNull {
			z = strings.ReplaceAll(z, "`", "")
			if strings.EqualFold(a.ColumnName, z) {
				ch = fmt.Sprintf(" NOT NULL ")
				rR = append(rR, modifyLogoRecord{
					Object:   "column null",
					Logo:     a.ColumnName,
					OldValue: "NULL",
					NewValue: ch,
					Reason:   "primary key must is not null",
				})
			}
		}
	}
	return
}
func newCharacterOptions(a string) (ch string) {
	if len(a) > 0 && !strings.EqualFold(a, "NULL") {
		ch = fmt.Sprintf(" CHARACTER SET %v ", a)
	}
	return
}
func newCollateOptions(a string) (ch string) {
	if len(a) > 0 && !strings.EqualFold(a, "NULL") {
		ch = fmt.Sprintf(" COLLATE %v ", a)
	}
	return
}
func newDefaultOptions(a parDef.ColMetaMapS, TypeConstraint, nullConstraint string) (recoverRecord modifyLogoRecord, ch string) {
	recoverRecord = modifyLogoRecord{
		Object:   "column default",
		Logo:     a.ColumnName,
		OldValue: a.ColDefaultMap,
		NewValue: "",
	}
	if len(a.ColDefaultMap) > 0 || strings.EqualFold(a.Type.ColTypeMap, "''") {
		switch {
		case strings.EqualFold(TypeConstraint, "text"):
			recoverRecord.Reason = "text date Type not use default value"
		case strings.Contains(nullConstraint, "NOT NULL") && strings.EqualFold(defaultValueMapSwitch(TypeConstraint, a.TypeBelong, a.ColDefaultMap), "NULL"):
			recoverRecord.Reason = "not null Constraint not use default null"
		default:
			ch = fmt.Sprintf("DEFAULT %v", defaultValueMapSwitch(TypeConstraint, a.TypeBelong, a.ColDefaultMap))
		}
	}
	return
}
func tableRowsSize(s []parDef.ColMetaMapS, factor int) (maxSize, nullSum, colSum int, byteOrder []int, byteMerge map[int][]string) {
	var cByte int
	byteMerge = make(map[int][]string)
	for _, a := range s {
		_, cByte = colTypeMapSwitch(a, factor)
		maxSize += cByte
		//if strings.EqualFold(a.IsNull, "YES") {
		//	nullSum++
		//}
		colSum++
		if b, ok := byteMerge[cByte]; ok {
			byteMerge[cByte] = append(b, a.ColumnName)
		} else {
			byteMerge[cByte] = []string{a.ColumnName}
			byteOrder = append(byteOrder, cByte)
		}
	}
	//倒序排列
	sort.Slice(byteOrder, func(i, j int) bool {
		return byteOrder[i] > byteOrder[j]
	})
	return
}

/*
	计算行长度
	行头长度计算指导原则
	列的数量：
		对于每 8 列，通常需要 1 字节来表示 NULL 位图。
	NULL 值的存在：
		如果某些列允许 NULL 值，行头长度可能会增加。
	一般情况下，如果有 NULL 值，行头长度会增加到 2 到 5 字节。
	行格式：
		COMPACT、REDUNDANT、DYNAMIC 和 COMPRESSED 行格式会影响行头的大小：
		COMPACT：较小的行头，通常为 1 到 2 字节（无 NULL 列时）。
		DYNAMIC：可能需要 4 到 6 字节。
*/
/*
	null值标识位，用bit来表示，一个bit 存储0、1，每个列根据存储
*/
//table 头部占用字节数
/*
	1、记录头信息(必须)
	记录头信息固定占用5个字节也就是40位，不同位代表不同信息，主要有：
		delete_mask 标记该记录是否被删除
		record_type 表示当前记录的类型
		0表示普通记录，1表示B+树非叶子节点记录，2表示最小记录，3表示最大记录
		next_record 表示下一条记录的相对位置

	2、隐藏列(必须)
	隐藏列中的信息因为与事务和主键有关，所以很重要，总共占用19个字节，有三列：
		row_id (不必须) 替补主键id
		trx_id 事务id
		roll_pointer 回滚指针
		这里需要提一下InnoDB表对主键的生成策略：
		优先使用用户自定义主键作为主键，如果用户没有定义主键，则选取一个Unique键作为主键，如果表中连Unique键都没有定义的话，则InnoDB会为表默认添加一个名为row_id的隐藏列作为主键。
*/
func varcharToText(metaBase MetaBaseInitResult) (modifyText []typeModifyRecord) {
	maxSize, _, colSum, byteOrder, byteMerge := tableRowsSize(metaBase.columnMetaMap, metaBase.factor)
	nullSize := tableRowsNullSize(colSum)
	maxSize += nullSize
	if maxSize > 65527 {
		tmpMaxSize := maxSize
	ll:
		for _, v := range byteOrder {
			for i := 0; i < len(byteMerge[v]); i++ {
				if tmpMaxSize < 65527 {
					break ll
				}
				if _, ok := metaBase.indexColumnUniq[byteMerge[v][i]]; !ok {
					modifyText = append(modifyText, typeModifyRecord{
						columnName: byteMerge[v][i],
						oldType:    "varchar",
						newType:    "text",
					})
					tmpMaxSize = tmpMaxSize - v + 12 /* text 占用12个字节的外部指针长度*/ + 10
				}
			}
		}
	}
	return
}
func decimalToBigint(metaBase MetaBaseInitResult) (modifyText []typeModifyRecord) {
	var partCol []string
	partCol = append(partCol, metaBase.partMe.PartCol...)
	partCol = append(partCol, metaBase.partMe.SubPartCol...)
	for _, v := range partCol {
		for _, col := range metaBase.columnMetaMap {
			if strings.EqualFold(col.ColumnName, v) && strings.EqualFold(col.TypeBelong, "decimal") {
				modifyText = append(modifyText, typeModifyRecord{
					columnName: v,
					oldType:    "decimal",
					newType:    "bigint",
				})
			}
		}

	}
	return
}
func charToText(metaBase MetaBaseInitResult) (modifyText []typeModifyRecord) {
	for _, v := range metaBase.columnMetaMap {
		if strings.EqualFold(v.Type.ColTypeMap, "CHAR(show)") && v.Type.ShowValue > 250 {
			modifyText = append(modifyText, typeModifyRecord{
				columnName: v.ColumnName,
				oldType:    "char",
				newType:    "text",
			})
		}
	}
	return
}
func modifyTypeFunc(metaBase MetaBaseInitResult) (modifyText []typeModifyRecord) {
	modifyText = append(modifyText, varcharToText(metaBase)...)
	modifyText = append(modifyText, decimalToBigint(metaBase)...)
	modifyText = append(modifyText, charToText(metaBase)...)
	return
}
func getColumnMetaMap(column string, metaBase MetaBaseInitResult) (result parDef.ColMetaMapS) {
	for _, v := range metaBase.columnMetaMap {
		if strings.EqualFold(column, v.ColumnName) {
			return v
		}
	}
	return
}
func (tm TableMe) createColumnBody(parameter parDef.Parameter, metaBase MetaBaseInitResult) (res MetaBaseInitResult) {
	var creatText []string
	res = metaBase
	var createTableHead = fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%v`.`%v` (\n", parameter.Object.Schema, parameter.Object.Table)
	if len(res.columnMeta) == 0 {
		return
	}
	modifyTypeColumn := modifyTypeFunc(res)
	for _, a := range res.columnMeta {
		b := getColumnMetaMap(a.ColumnName, res)
		rR1, TypeConstraint := newColumnTypeOptions(b, res.factor, modifyTypeColumn)
		res.switchRecord = append(res.switchRecord, rR1...)
		rR2, nullConstraint := newColumnNullOptions(b, res.modifyNotNull)
		res.switchRecord = append(res.switchRecord, rR2...)
		rR3, defaultValue := newDefaultOptions(b, TypeConstraint, nullConstraint)
		res.switchRecord = append(res.switchRecord, rR3)
		creatText = append(creatText, fmt.Sprintf("`%v` %v %v %v %v %v %v %v %v ", a.ColumnName,
			TypeConstraint,
			newCharacterOptions(b.Charset),
			newCollateOptions(b.CollationName),
			nullConstraint,
			defaultValue,
			columnAutoIncrementOptions(a.AutoIncrement),
			columnInvisibleOptions(a.Invisible),
			generalComment(" comment ", a.ColumnComment)))
	}
	res.createTableSql = fmt.Sprintf("%v %v \n) %v %v %v ", createTableHead, strings.Join(creatText, fmt.Sprintf(",\t\n")), commentAction("DEFAULT CHARSET= ", res.Character), commentAction("COLLATE= ", res.Collate), commentAction("COMMENT ", res.Comment))
	if len(res.createIndexSql) > 0 {
		res.createTableSql = fmt.Sprintf("%v %v,\n %v \n) %v %v %v", createTableHead, strings.Join(creatText, fmt.Sprintf(",\t\n")), res.createIndexSql, commentAction("DEFAULT CHARSET= ", res.Character), commentAction("COLLATE= ", res.Collate), commentAction("COMMENT ", res.Comment))
	}
	return
}
func columnMetaBaseInit(meta any) (columnMetaSG []columnMeta, err error) {
	var event = "[columnMetaBaseInit]"
	if meta == nil {
		return
	}
	for _, v := range meta.([]map[string]any) {
		var columnMetaS columnMeta
		if err = ref.MapToStruct(v, &columnMetaS); err != nil {
			err = ref.ErrAddPrintf(event, err)
			return
		}
		columnMetaSG = append(columnMetaSG, columnMetaS)
	}
	return
}
func generalPartRulesSql(prefix, meth string, data parDef.PartRule) (res string) {
	var partName string
	switch prefix {
	case "PARTITION":
		partName = data.Name
	case "SUBPARTITION":
		partName = data.SubName
	}
	switch strings.ToUpper(meth) {
	case "RANGE":
		res = fmt.Sprintf("%v `%v` VALUES LESS THAN (%v)", prefix, partName, data.Rules)
	case "LIST":
		res = fmt.Sprintf("%v `%v` VALUES IN (%v)", prefix, partName, data.Rules)
	}
	return
}
func partMethData(metaBase MetaBaseInitResult) (meth parDef.PartMeth) {
	meth = metaBase.partMe.Meth
	if !strings.EqualFold(meth.Sub, "NULL") {
		for _, v := range subPartColData(metaBase) {
			var col = v
			if n := strings.Index(v, "("); n != -1 {
				col = v[n:]
			}
			if n := strings.Index(col, ")"); n != -1 {
				col = col[n:]
			}
			var p string
			for _, co := range metaBase.columnMetaMap {
				if strings.EqualFold(col, co.ColumnName) {
					p = co.TypeBelong
				}
			}
			switch p {
			case "text", "char", "varchar", "time":
				meth.Sub = "KEY"
			case "int", "bigint":
				meth.Sub = "HASH"
			}
		}
	}
	return
}
func partColData(metaBase MetaBaseInitResult) (partColumn []string) {
	return metaBase.partMe.PartCol
}
func subPartColData(metaBase MetaBaseInitResult) (partColumn []string) {
	return metaBase.partMe.SubPartCol
}
func partRulesData(metaBase MetaBaseInitResult) (partRules []parDef.PartRule) {
	return metaBase.partMe.RuleDesc
}
func subPartRulesData(metaBase MetaBaseInitResult) (partRules []parDef.PartRule) {
	return metaBase.partMe.SubPartDesc
}
func getHashKeyPartMeth(metaBase MetaBaseInitResult, partCol []string, oldMeth string) (meth string) {
	for _, v := range partCol {
		for _, vv := range metaBase.columnMetaMap {
			if strings.EqualFold(v, vv.ColumnName) {
				switch strings.ToLower(vv.TypeBelong) {
				case "varchar", "char":
					return "KEY"
				default:
					return "HASH"
				}
			}
		}
	}
	return oldMeth
}
func CreatePartBody(metaBase MetaBaseInitResult) (createPartBody string, err error) {
	var meth = partMethData(metaBase)
	var partR = partRulesData(metaBase)
	var subPartR = subPartRulesData(metaBase)
	if !strings.EqualFold(meth.First, "NULL") {
		switch meth.First {
		case "KEY", "HASH":
			createPartBody = fmt.Sprintf("PARTITION BY %v (`%v`)\n", getHashKeyPartMeth(metaBase, metaBase.partMe.PartCol, meth.First), strings.Join(partColData(metaBase), "`,`"))
		case "RANGE", "LIST":
			createPartBody = fmt.Sprintf("PARTITION BY %v COLUMNS (`%v`)\n", meth.First, strings.Join(partColData(metaBase), "`,`"))
		}
	}
	if !strings.EqualFold(meth.Sub, "NULL") && !strings.EqualFold(meth.Sub, "NONE") && len(meth.Sub) > 0 {
		createPartBody = fmt.Sprintf("%v SUBPARTITION BY %v (`%v`) \n SUBPARTITIONS %v \n", createPartBody, getHashKeyPartMeth(metaBase, metaBase.partMe.SubPartCol, meth.Sub), strings.Join(subPartColData(metaBase), "`,`"), len(subPartR)/len(partR))
	}
	var rules []string
	for _, v := range partR {
		switch meth.First {
		case "HASH", "KEY":
			rules = append(rules, fmt.Sprintf("PARTITIONS %v ", len(partR)))
			createPartBody = fmt.Sprintf("%v \n %v\n", createPartBody, strings.Join(rules, ",\n"))
			return
		default:
			rules = append(rules, generalPartRulesSql("PARTITION", meth.First, v))
		}
	}
	if len(createPartBody) > 0 && len(rules) > 0 {
		createPartBody = fmt.Sprintf("%v (\n %v)\n", createPartBody, strings.Join(rules, ",\n"))
	}
	return
}

/*
CreateGeneral
遇到的问题
1: 索引长度超限（767），必要时可使用前缀索引或减少数据类型长度   已解决
2：行长度超限（65535），超过优先转varchar最大的，逐层依次降低  已解决
3：partition column 不能有comment，create table不受限制 alter comment 受限制   已解决
4：partition column type 是decimal，需要转换为flot    已解决
5：partition column type 是varchar,datetime，需要增加partition by range columns  已解决
6：表存在主键索引或唯一索引时，需要包含分区列     已解决
7：range 或list 分区 values less than (”) 存在单引号且数据类型为int类型，转换失败  已解决
8：mysql体系不允许大字段有blobText，json，longtext 有默认值问  已解决
9:  comment 中包含单引号或者;号问题     已解决
10：bit(0) 不支持非法Error 3013 (HY000): Invalid size
11: 排除行长度超限时将索引列转换为text
12: mysql 开启sql_generate_invisible_primary_key后，创建无主键分区表报doesn't yet support 'generating invisible primary key for the partitioned tables'
*/
func (tm TableMe) CreateGeneral(parameter parDef.Parameter) (result global.Return, err error) {
	var metaBase MetaBaseInitResult
	var newMetaBase MetaBaseInitResult
	var partBody string
	if metaBase, err = metaBaseInit(parameter.Meta); err != nil {
		err = ref.ErrAddPrintf("CreateGeneral", err)
		return
	}
	if metaBase.factor, err = tm.getDefaultCharsetNum(parameter); err != nil {
		err = ref.ErrAddPrintf("CreateGeneral", err)
		return
	}
	if metaBase, err = tm.GetGIPKAction(parameter, metaBase); err != nil {
		err = ref.ErrAddPrintf("CreateGeneral", err)
		return
	}
	if partBody, err = CreatePartBody(metaBase); err != nil {
		err = ref.ErrAddPrintf("CreateGeneral", err)
		return
	}
	newMetaBase = tm.createColumnBody(parameter, CreateIndexBody(metaBase))
	createSql := fmt.Sprintf("%v %v ;", newMetaBase.createTableSql, partBody)
	result.Result = createSql
	return
}
func (tm TableMe) alterDropIndex(par any) (result []string) {
	switch par.(type) {
	case map[string][]string:
		for k := range par.(map[string][]string) {
			var delIndexName = k
			if n := strings.LastIndex(k, "@"); n != -1 {
				delIndexName = k[:n]
			}
			result = append(result, fmt.Sprintf(" DROP INDEX `%v` ", delIndexName))
		}
	}
	return
}
func (tm TableMe) alterAddIndex(par any, colMap []parDef.ColMetaMapS, partCol []string, factor int) (result []string) {
	switch par.(type) {
	case map[string][]string:
		for k, v := range par.(map[string][]string) {
			var addIndexName = k
			var colKey string
			if n := strings.LastIndex(k, "@"); n != -1 {
				addIndexName = k[:n]
				colKey = k[n+1:]
			}
			newColumn := uniqueAddPartition(colKey, v, partCol)
			colByte := getIndexByte(newColumn, colMap, factor)
			newNewColumn := indexColSubPrefix(newColumn, colByte, factor)
			var addIndex string
			switch colKey {
			case "I":
				addIndex = fmt.Sprintf(" ADD INDEX `%v` (%v)", addIndexName, strings.Join(newNewColumn, ","))
			case "P":
				addIndex = fmt.Sprintf(" ADD PRIMARY `PRIMARY` (%v)", strings.Join(newNewColumn, ","))
			case "U":
				addIndex = fmt.Sprintf(" ADD UNIQUE INDEX `%v` (%v)", addIndexName, strings.Join(newNewColumn, ","))
			}
			result = append(result, addIndex)
		}
	}
	return
}
func (tm TableMe) alterIndex(parameter parDef.Parameter, metaBase MetaBaseInitResult) (result []string) {
	if parameter.Meta == nil || parameter.Meta.(map[string]any)["indexComparison"] == nil {
		return
	}
	var indexModify []string
	var alterIndexPrefix = fmt.Sprintf("ALTER TABLE `%v`.`%v` ", parameter.Object.Schema, parameter.Object.Table)
	indexMe := parameter.Meta.(map[string]any)["indexComparison"]
	var partCol []string
	partCol = append(partCol, partColData(metaBase)...)
	partCol = append(partCol, subPartColData(metaBase)...)
	switch indexMe.(type) {
	case map[string]any:
		add, _ := indexMe.(map[string]any)["add"]
		del, _ := indexMe.(map[string]any)["del"]
		if add != nil {
			indexModify = append(indexModify, tm.alterAddIndex(add, metaBase.columnMetaMap, partCol, metaBase.factor)...)
		}
		if del != nil {
			indexModify = append(indexModify, tm.alterDropIndex(del)...)
		}
	}
	switch parameter.Options.AlterDDL.Rule {
	case "many":
		if len(indexModify) > 0 {
			result = append(result, fmt.Sprintf("%v %v;", alterIndexPrefix, strings.Join(indexModify, ",")))
		}
	default:
		for _, v := range indexModify {
			result = append(result, fmt.Sprintf("%v %v;", alterIndexPrefix, v))
		}
	}
	return
}
func (tm TableMe) alterDropCol(par any) (result []string) {
	switch par.(type) {
	case []parDef.ColMetaMapS:
		for _, v := range par.([]parDef.ColMetaMapS) {
			result = append(result, fmt.Sprintf(" DROP COLUMN `%v` ", v.ColumnName))
		}
	}
	return
}
func charsetOptions(charset string) (newCharset string) {
	switch strings.ToUpper(charset) {
	case "NULL":
		return
	default:
		return charset
	}
}
func collationOptions(charset string) (newCharset string) {
	switch strings.ToUpper(charset) {
	case "NULL":
		return
	default:
		return charset
	}
}
func alterColPosition(position string) (newPosition string) {
	switch position {
	case "first":
		return fmt.Sprintf("FIRST")
	case "end":
		return
	default:
		return fmt.Sprintf("AFTER `%v` ", position)
	}
}
func (tm TableMe) alterAddCol(par any) (result []string) {
	switch par.(type) {
	case []parDef.ColMetaMapS:
		for _, v := range par.([]parDef.ColMetaMapS) {
			TypeConstraint, _ := colTypeMapSwitch(v, 0)
			_, nullConstraint := newColumnNullOptions(v, nil)
			_, defaultValue := newDefaultOptions(v, TypeConstraint, nullConstraint)
			result = append(result, fmt.Sprintf("ADD COLUMN `%v` %v %v "+
				"%v %v %v %v "+
				"%v %v %v",
				v.ColumnName, TypeConstraint, nullConstraint,
				charsetOptions(v.Charset), collationOptions(v.CollationName), defaultValue,
				columnAutoIncrementOptions(v.ColAutoIncrement), columnInvisibleOptions(v.Invisible), generalComment(" comment ", v.ColumnComment),
				alterColPosition(v.BeforeColName),
			))
		}
	}
	return
}
func (tm TableMe) AlterColumn(parameter parDef.Parameter) (result []string) {
	if parameter.Meta == nil || parameter.Meta.(map[string]any)["colComparison"] == nil {
		return
	}
	var colModify []string
	var alterIndexPrefix = fmt.Sprintf("ALTER TABLE `%v`.`%v` ", parameter.Object.Schema, parameter.Object.Table)
	colMe := parameter.Meta.(map[string]any)["colComparison"]
	switch colMe.(type) {
	case map[string]any:
		add, _ := colMe.(map[string]any)["add"]
		del, _ := colMe.(map[string]any)["del"]
		if add != nil {
			colModify = append(colModify, tm.alterAddCol(add)...)
		}
		if del != nil {
			colModify = append(colModify, tm.alterDropCol(del)...)
		}
	}
	switch parameter.Options.AlterDDL.Rule {
	case "many":
		if len(colModify) > 0 {
			result = append(result, fmt.Sprintf("%v %v;", alterIndexPrefix, strings.Join(colModify, ",")))
		}
	default:
		for _, v := range colModify {
			result = append(result, fmt.Sprintf("%v %v;", alterIndexPrefix, v))
		}
	}
	return
}
func (tm TableMe) AlterGeneral(parameter parDef.Parameter) (result global.Return, err error) {
	var metaBase MetaBaseInitResult
	var event = "[AlterGeneral]"
	if metaBase, err = metaBaseInit(parameter.Meta); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	if metaBase.factor, err = tm.getDefaultCharsetNum(parameter); err != nil {
		err = ref.ErrAddPrintf(event, err)
		return
	}
	var alterSql []string
	alterSql = append(alterSql, tm.AlterColumn(parameter)...)
	alterSql = append(alterSql, tm.alterIndex(parameter, metaBase)...)
	if len(alterSql) > 0 {
		result.Result = strings.Join(alterSql, "\n")
	}
	return
}
func printColumnBody(metaBase MetaBaseInitResult) (res []string) {
	for _, v := range metaBase.columnMeta {
		res = append(res, ref.AnyToJsonString(v))
	}
	return
}
func printIndexBody(metaBase MetaBaseInitResult) (res []string) {
	for _, v := range metaBase.indexMeta {
		res = append(res, ref.AnyToJsonString(v))
	}
	return
}
func printPartBody(metaBase MetaBaseInitResult) (res []string) {
	res = append(res, fmt.Sprintf("part meth:%v,sub part meth:%v", partMethData(metaBase).First, partMethData(metaBase).Sub))
	res = append(res, fmt.Sprintf("part column:%v,sub part column:%v", partColData(metaBase), subPartColData(metaBase)))
	for _, v := range partRulesData(metaBase) {
		res = append(res, fmt.Sprintf("part rules:%v", ref.AnyToJsonString(v)))
	}
	for _, v := range subPartRulesData(metaBase) {
		res = append(res, fmt.Sprintf("sub part rules:%v", ref.AnyToJsonString(v)))
	}
	return
}
func printCreateSingleColumnSql(v columnMeta) (res string) {
	var null, dataType string
	switch v.IsNull {
	case "Y":
		null = "NULL"
	case "N":
		null = "NOT NULL"
	}
	dataType = fmt.Sprintf("%v", v.DataType)
	switch {
	case v.CharLength > 0:
		if v.DataLength > 0 {
			dataType = fmt.Sprintf("%v(%v)", v.DataType, v.DataLength)
		}
	default:
		switch strings.ToUpper(v.ColumnType) {
		case "DECIMAL":
			if v.DataPrecision > 0 {
				dataType = fmt.Sprintf("%v(%v,%v)", v.DataType, v.DataPrecision, v.DataScale)
			}
		case "FLOAT", "ROWID", "UROWID", "DATE", "TIMESTAMP", "TIMESTAMP WITH TIME ZONE", "TIMESTAMP WITH LOCAL TIME ZONE", "CLOB", "NCLOB", "LONG", "BLOB", "LONG RAW", "RAW":
			if v.DataPrecision > 0 {
				dataType = fmt.Sprintf("%v(%v)", v.DataType, v.DataPrecision)
			}

		}
	}
	var comment string
	if len(v.ColumnComment) > 0 {
		comment = fmt.Sprintf(" comment %v ", v.ColumnComment)
	}
	return fmt.Sprintf("\"%v\" %v %v DEFAULT %v %v", v.ColumnName, dataType, null, v.ColumnDefault, comment)
}
func printCreatePartSql(metaBase MetaBaseInitResult) (createPartBody string) {
	var crePartRule []string
	var partRules = partRulesData(metaBase)
	var subPartRules = subPartRulesData(metaBase)

	if !strings.EqualFold(partMethData(metaBase).First, "NULL") && !strings.EqualFold(partMethData(metaBase).First, "NONE") {
		createPartBody = fmt.Sprintf("PARTITION BY %v (`%v`)", partMethData(metaBase).First, strings.Join(partColData(metaBase), "\",\""))
	}
	if !strings.EqualFold(partMethData(metaBase).Sub, "NULL") && !strings.EqualFold(partMethData(metaBase).Sub, "NONE") {
		switch partMethData(metaBase).Sub {
		case "HASH", "KEY":
			createPartBody = fmt.Sprintf("%v SUBPARTITION BY %v (`%v`) \n SUBPARTITIONS %v \n", createPartBody, partMethData(metaBase).Sub, strings.Join(subPartColData(metaBase), "`,`"), len(subPartRules)/len(partRules))
		default:
			createPartBody = fmt.Sprintf("%v\nSUBPARTITION BY %v (`%v`)", createPartBody, partMethData(metaBase).Sub, strings.Join(subPartColData(metaBase), "\",\""))
		}
	}
	switch partMethData(metaBase).First {
	case "HASH", "KEY":
		crePartRule = append(crePartRule, fmt.Sprintf("PARTITIONS %v ", len(partRules)))
		createPartBody = fmt.Sprintf("%v \n %v\n", createPartBody, strings.Join(crePartRule, ",\n"))
		return
	default:
		for _, v := range partRules {
			var subPartSql []string
			for _, vv := range subPartRules {
				if strings.EqualFold(v.Name, vv.Name) {
					pq := generalPartRulesSql("SUBPARTITION", partMethData(metaBase).Sub, vv)
					if len(pq) > 0 {
						subPartSql = append(subPartSql)
					}
				}
			}
			crePartRule = append(crePartRule, fmt.Sprintf("%v %v", generalPartRulesSql("PARTITION", partMethData(metaBase).First, v), func() string {
				if len(subPartSql) > 0 {
					return fmt.Sprintf("(\n%v\n)", strings.Join(subPartSql, ",\n"))
				}
				return ""
			}()))
		}
	}
	if len(createPartBody) > 0 && len(crePartRule) > 0 {
		createPartBody = fmt.Sprintf("%v (\n%v\n)", createPartBody, strings.Join(crePartRule, ",\n"))
	}
	return
}
func printCreateColumnSql(schema, table string, metaBase MetaBaseInitResult) (creTabCol []string) {
	var createTableColumn []string
	for _, v := range metaBase.columnMeta {
		createTableColumn = append(createTableColumn, printCreateSingleColumnSql(v))
	}
	creTabCol = append(creTabCol, fmt.Sprintf("\nCREATE TABLE `%v`.`%v` (\n%v\n) \n%v;", schema, table, strings.Join(createTableColumn, ",\n"), printCreatePartSql(metaBase)))
	return
}
func printIndexSql(schema, table string, v parDef.IndexColumnMe) (res string) {
	var indexCol []string
	for _, vv := range v.ColumnMate {
		indexCol = append(indexCol, fmt.Sprintf("%v", vv.ColumnName))
	}
	switch v.ColumnKey {
	case "P":
		res = fmt.Sprintf("ALTER TABLE `%v`.`%v` ADD PRIMARY KEY (`%v`);", schema, table, strings.Join(indexCol, ","))
	case "U":
		res = fmt.Sprintf("CREATE UNIQUE INDEX `%v` ON `%v`.`%v`(`%v`);", v.IndexName, schema, table, strings.Join(indexCol, "\",\""))
	case "I":
		res = fmt.Sprintf("CREATE INDEX `%v` ON `%v`.`%v`(`%v`);", v.IndexName, schema, table, strings.Join(indexCol, ","))
	}
	return
}
func printCreateIndexSql(schema, table string, metaBase MetaBaseInitResult) (creTabIndex []string) {
	for _, v := range metaBase.indexMeta {
		creTabIndex = append(creTabIndex, printIndexSql(schema, table, v))
	}
	return
}
func (tm TableMe) CreatePrint(parameter parDef.Parameter) (result global.Return, err error) {
	var metaBase MetaBaseInitResult
	var res []string
	if metaBase, err = metaBaseInit(parameter.Meta); err != nil {
		err = ref.ErrAddPrintf("CreatePrint", err)
		return
	}
	res = append(res, printColumnBody(metaBase)...)
	res = append(res, printIndexBody(metaBase)...)
	res = append(res, printPartBody(metaBase)...)
	res = append(res, printCreateColumnSql(parameter.Object.Schema, parameter.Object.Table, metaBase)...)
	res = append(res, printCreateIndexSql(parameter.Object.Schema, parameter.Object.Table, metaBase)...)
	result.Result = strings.Join(res, "\n")
	return
}
