package Gr

import (
	"encoding/json"
	"errors"
	"fmt"
	"db2s/ETL"
	ea "db2s/encryptionAlgorithm"
	"db2s/global"
	"sort"
	"strconv"
	"strings"
)

type TableColumn struct {
}

func (cs TableColumn) nilStrconvNull(s string) string {
	switch s {
	case "<null>":
		return "NULL"
	case "<entry>":
		return "''"
	default:
		return s
	}
}

// GetColumnType 数据类型判断，是字符串还是数字
func (cs TableColumn) GetColumnType(s global.GetColumnTypeInput) (result any, err error) {
	var Type, null []string
	for _, v := range s.ColumnFilter {
		if strings.EqualFold(v, "_hidden_pk_") {
			return global.GetIndexColumnProperty{
				NullGather: []string{"NO"},
				TypeGather: []string{"int"},
			}, nil
		}
		for _, v1 := range s.TableColumn {
			if strings.EqualFold(v1.ColumnName, v) {
				switch strings.ToUpper(func(s1 string) (s string) {
					if n := strings.Index(s1, "("); n != -1 {
						s = s1[:n]
					} else {
						s = s1
					}
					return
				}(v1.DataType)) {
				case "INT", "BIGINT", "NUMBER", "FLOAT", "INTEGER", "SMALLINT", "REAL", "BINARY_DOUBLE", "BINARY_FLOAT", "BINARY_INTEGER", "PLS_INTEGER", "NATURAL", "NATURALN", "POSITIVE", "POSITIVEN", "SIGNTYPE", "SIMPLE_INTEGER", "DECIMAL", "DEC", "NUMERIC":
					Type = append(Type, "int")
				case "VARCHAR2", "CHAR", "NCHAR", "ROWID", "VARCHAR", "NVARCHAR2":
					Type = append(Type, "string")
				case "SYSDATE", "DATE", "TIMESTAMP", "TIMESTAMP WITH TIME ZONE", "TIMESTAMP WITH LOCAL TIME ZONE":
					Type = append(Type, "datetime")
				default:
					Type = append(Type, "string")
				}
				null = append(null, v1.IsNull)
			}
		}
	}
	return global.GetIndexColumnProperty{
		NullGather: null,
		TypeGather: Type,
	}, nil
}

/*
All MySQL 获取校验表的列信息，包含列名，列序号，列类型
*/
func (cs TableColumn) All(s global.GetColumnTypeInput) (result global.AllReturnResult, err error) {
	var (
		event = "[clusterAll]"
		f1    any
	)
	result.BaseResult.Sql = fmt.Sprintf("select table_Schema as databaseName,table_name as tableName,COLUMN_NAME as columnName,DATA_TYPE as columnType,COLUMN_TYPE as dataType,NUMERIC_PRECISION as dataPrecision,NUMERIC_SCALE as dataScale,ORDINAL_POSITION as columnSeq,IS_NULLABLE as isNull,CHARACTER_SET_NAME as charset,COLLATION_NAME as collationName,COLUMN_COMMENT as columnComment,REPLACE(REPLACE(COLUMN_DEFAULT, 'CHARACTER SET utf8mb4', ''), 'CHARACTER SET utf8', '') as columnDefault,EXTRA as autoIncrement  from information_Schema.columns where table_schema in ('%v') and table_name in ('%v') order by ORDINAL_POSITION", s.TableInfo.Schema, s.TableInfo.Table)
	if result.BaseResult.TimeOut, f1, err = ExecSelectSql(ExecSqlInputS{
		Ss:           s,
		Event:        event,
		SelectSqlStr: result.BaseResult.Sql,
		Expect:       ETL.SMap,
	}); err != nil || f1 == nil {
		if err != nil {
			err = errors.New(fmt.Sprintf("%v %v", event, err))
		}
		return
	}
	for _, v := range f1.([]map[string]interface{}) {
		var m = global.TableMeta{}
		var jsonData []byte
		for g, j := range v {
			v[g] = fmt.Sprintf("%v", j)
		}
		if p, ok := v["autoIncrement"]; ok {
			switch strings.ToLower(cs.nilStrconvNull(fmt.Sprintf("%v", p))) {
			case "auto_increment":
				v["autoIncrement"] = true
			default:
				v["autoIncrement"] = false
			}
		}
		jsonData, err = json.Marshal(v)
		if err != nil {
			err = errors.New(fmt.Sprintf("%v []map[string]any strconv json fail. Execution process{strconv data:%v error:%v}", event, v, err))
			return
		}
		err = json.Unmarshal(jsonData, &m)
		if err != nil {
			err = errors.New(fmt.Sprintf("%v []byte json unmarshal struct fail. Execution process{strconv data:%v error:%v}", event, string(jsonData), err))
			return
		}
		n := cs.ColumnTypeRedefine(m)
		result.Result = append(result.Result, n)
	}
	return
}
func (cs TableColumn) DropColumnSql(s global.GetColumnTypeInput) (any, error) {
	return global.AlterSqlResult{
		Sql: fmt.Sprintf("alter table `%v`.`%v` drop column `%v`;", s.TableInfo.Schema, s.TableInfo.Table, s.DdlColumn.Name),
	}, nil
}
func getCharset(s global.GetColumnTypeInput) (res int, err error) {
	var (
		event = "[clusterGetCharset]"
		f1    any
	)
	strSql := fmt.Sprintf("SELECT DEFAULT_CHARACTER_SET_NAME as `character` FROM INFORMATION_SCHEMA.SCHEMATA where SCHEMA_NAME='%v'", s.TableInfo.Schema)
	if _, f1, err = ExecSelectSql(ExecSqlInputS{
		Ss:           s,
		Event:        event,
		SelectSqlStr: strSql,
		Expect:       ETL.Map,
	}); err != nil || f1 == nil {
		if err != nil {
			err = errors.New(fmt.Sprintf("%v %v", event, err))
		}
		return
	}
	if v1, ok := f1.(map[string]interface{})["character"]; ok {
		switch strings.ToLower(fmt.Sprintf("%v", v1)) {
		case "utf8mb3":
			res = 3
		case "utf8mb4":
			res = 4
		}
	}
	return
}

func (cs TableColumn) AddColumnSql(s global.GetColumnTypeInput) (any, error) {
	var (
		TypeConstraint string
		nullConstraint = fmt.Sprintf(" NULL ")
		ChartSetValue  string
		CollationValue string
		DefaultValue   string
		factor         = 1
	)
	TypeConstraint, _ = TypeConvert(s.DdlColumn.SMeta, factor)
	if strings.EqualFold(s.DdlColumn.SMeta.IsNull, "NO") {
		nullConstraint = fmt.Sprintf(" NOT NULL ")
	}
	if len(s.DdlColumn.SMeta.ColumnDefault) > 0 || strings.EqualFold(s.DdlColumn.SMeta.ColumnDefault, "''") {
		DefaultValue = fmt.Sprintf("DEFAULT %v", s.DdlColumn.SMeta.ColumnDefault)
	}
	return global.AlterSqlResult{
		Sql: fmt.Sprintf("alter table `%v`.`%v` add column `%v` %v %v %v %v %v %v;", s.TableInfo.Schema, s.TableInfo.Table, s.DdlColumn.Name, TypeConstraint, nullConstraint, ChartSetValue, CollationValue, DefaultValue, commentAction("COMMENT ", s.DdlColumn.SMeta.ColumnComment)),
	}, nil
}

func (cs TableColumn) ModifyColumnSql(s global.GetColumnTypeInput) (any, error) {
	var (
		TypeConstraint string
		nullConstraint = fmt.Sprintf(" NULL ")
		ChartSetValue  string
		CollationValue string
		DefaultValue   string
		factor         = 1
		record         string
		alterSql       string
	)
	//处理因异构数据库导致的差异
	/*
		1：因为主键索引导致的null值和not null的差异
		2：因为null和not null的差异导致的default值的差异
	*/
	var abGather = make(map[string]int)
	if l, err := global.StructSubJsonNameToMap(*s.DdlColumn.Attributes); err != nil {
		return nil, err
	} else {
		for k, v := range l {
			if v.(bool) {
				abGather[k]++
			}
		}
	}
	if s.DdlColumn.Attributes.Type {
		TypeConstraint, _ = TypeConvert(s.DdlColumn.SMeta, factor)
	} else {
		TypeConstraint = s.DdlColumn.DMeta.ColumnType
	}
	partTrue := strings.Contains(strings.ToLower(strings.Join(s.DdlColumn.PartColumn, ",")), strings.ToLower(s.DdlColumn.Name))
	priTrue := strings.Contains(strings.ToLower(strings.Join(s.DdlColumn.PriColumn, ",")), strings.ToLower(s.DdlColumn.Name))
	if strings.EqualFold(s.DdlColumn.SMeta.IsNull, "YES") && s.DdlColumn.Attributes.Null {
		if partTrue && priTrue {
			record = fmt.Sprintf("primary key include partition column must not null")
		} else if priTrue && !partTrue {
			record = fmt.Sprintf("primary key must not null")
		}
		nullConstraint = fmt.Sprintf(" NOT NULL ")
	} else if strings.EqualFold(s.DdlColumn.SMeta.IsNull, "NO") && s.DdlColumn.Attributes.Null {
		nullConstraint = fmt.Sprintf(" NOT NULL ")
	}
	if len(s.DdlColumn.SMeta.ColumnDefault) > 0 || strings.EqualFold(s.DdlColumn.SMeta.ColumnDefault, "''") {
		DefaultValue = fmt.Sprintf(" DEFAULT %v", DefaultValueConvert(s.DdlColumn.DMeta.ColumnType, s.DdlColumn.SMeta.ConvertColumnType.Type, s.DdlColumn.SMeta.ConvertDefaultValue))
	} else {
		if strings.EqualFold(strings.TrimSpace(nullConstraint), "NOT NULL") {
			DefaultValue = fmt.Sprintf(" DEFAULT '' ")
		}
	}
	alterSql = fmt.Sprintf("alter table `%v`.`%v` modify column `%v` %v %v %v %v %v %v;", s.TableInfo.Schema, s.TableInfo.Table, s.DdlColumn.Name, TypeConstraint, nullConstraint, ChartSetValue, CollationValue, DefaultValue, commentAction("COMMENT ", s.DdlColumn.SMeta.ColumnComment))
	if _, ok := abGather["null"]; ok && len(abGather) == 1 {
		alterSql = ""
	}
	return global.AlterSqlResult{
		Record: record,
		Sql:    alterSql,
	}, nil
}

func createTableSqlPartitionBody(s *global.GetColumnTypeInput) (record []global.HeterogeneousConvertRecord, partitionColumn []string, partitionTableSql string) {
	var (
		partBodyColumn                            = make(map[string][]string)
		partColumnType                            = make(map[string]string)
		pm                                        []global.TablePartitionConfig
		partBodyColumnMap                         = make(map[string][]string)
		partBodyColumnHead, subpartBodyColumnHead string
		partitionRules                            []string
		partMeth                                  string
		partSumMax                                int
	)
	if _, ok := s.TableMeta.PartitionMeta["source"]; !ok || len(s.TableMeta.PartitionMeta) == 0 {
		return
	}
	pm = s.TableMeta.PartitionMeta["source"]
	if len(pm) == 0 {
		return
	}
	if len(pm[0].FirstPartition.ColumnName) > 0 {
		partBodyColumnMap["first"] = pm[0].FirstPartition.ColumnName
	}
	if len(pm[0].SubPartMeta) > 0 {
		partBodyColumnMap["sub"] = pm[0].SubPartMeta[0].ColumnName
	}
	//计算hash sub 分区最大值
	partSumMax = subPartSumMax(s.TableMeta.PartitionMeta["source"])
	//处理主、子分区列及是否用到函数
	for _, w := range s.TableMeta.ColumnMeta["source"] {
		for k, v := range partBodyColumnMap {
			for _, p := range v {
				if strings.EqualFold(w.ColumnName, p) {
					partitionColumn = append(partitionColumn, w.ColumnName)
					partColumnType[p] = w.ConvertColumnType.Type
					switch {
					case strings.EqualFold(w.ConvertColumnType.Type, "decimal"):
						partBodyColumn[k] = append(partBodyColumn[k], fmt.Sprintf("`%v`", p))
						record = append(record, global.HeterogeneousConvertRecord{
							Object:   "partition column",
							Logo:     "column dateType convert",
							OldValue: w.ColumnType,
							NewValue: fmt.Sprintf("bigint"),
							Reason:   "partition column data type decimal must is int",
						})
						w.ConvertColumnType.DataLength = 8
						w.ConvertColumnType.Type = "int"
					case len(pm[0].FirstPartition.Func) > 0: //列使用到函数
						partBodyColumn[k] = append(partBodyColumn[k], fmt.Sprintf("%v(`%v`)", pm[0].FirstPartition.Func, p))
						record = append(record, global.HeterogeneousConvertRecord{
							Object:   "partition column",
							Logo:     "func convert",
							OldValue: fmt.Sprintf("%v(%v)", pm[0].FirstPartition.Func, p),
							NewValue: fmt.Sprintf("%v(`%v`)", pm[0].FirstPartition.Func, p),
							Reason:   "partition column data type func must Convert",
						})
					default:
						partBodyColumn[k] = append(partBodyColumn[k], fmt.Sprintf("`%v`", p))
					}
				}
			}
		}
	}
	//处理主分区列
	if columnX, ok := partBodyColumn["first"]; ok {
		if len(columnX) > 1 {
			partBodyColumnHead = fmt.Sprintf("columns (%v)", strings.Join(columnX, ","))
			record = append(record, global.HeterogeneousConvertRecord{
				Object:   "partition column",
				Logo:     "multiple column",
				OldValue: strings.Join(columnX, ","),
				NewValue: fmt.Sprintf("columns (%v)", strings.Join(columnX, ",")),
				Reason:   "partition multiple columns conversion",
			})
		} else if len(columnX) == 1 {
			for k, v := range partColumnType {
				if strings.Contains(columnX[0], k) {
					switch v {
					case "char", "varchar", "time", "timestamp":
						partBodyColumnHead = fmt.Sprintf("columns (%v)", columnX[0])
						record = append(record, global.HeterogeneousConvertRecord{
							Object:   "partition column",
							Logo:     "partition column type",
							OldValue: columnX[0],
							NewValue: fmt.Sprintf("columns (%v)", columnX[0]),
							Reason:   "partition column data type non-int conversion",
						})
					default:
						partBodyColumnHead = fmt.Sprintf("(%v)", columnX[0])
					}
				}
			}
		}
	}
	if columnX, ok := partBodyColumn["sub"]; ok {
		if len(columnX) > 1 {
			subpartBodyColumnHead = fmt.Sprintf("columns (%v)", strings.Join(columnX, ","))
		} else if len(columnX) == 1 {
			for k, v := range partColumnType {
				if strings.Contains(columnX[0], k) {
					switch v {
					//case "char", "varchar", "time", "timestamp":
					//	subpartBodyColumnHead = fmt.Sprintf("columns (%v)", columnX[0])
					default:
						subpartBodyColumnHead = fmt.Sprintf("(%v)", columnX[0])
					}
				}
			}
		}
		subpartBodyColumnHead = fmt.Sprintf("SUBPARTITION BY %v%v SUBPARTITIONS %v", s.TableMeta.PartitionMeta["source"][0].SubPartMeta[0].Meth, subpartBodyColumnHead, partSumMax)
	}
forLoop:
	for k, v := range pm {
		if k == 0 {
			partMeth = strings.ToUpper(v.FirstPartition.Meth)
		}
		switch strings.ToUpper(v.FirstPartition.Meth) {
		case "RANGE":
			var subRulesGather []string
			pr := fmt.Sprintf("PARTITION `%v` VALUES LESS THAN (%v)", v.FirstPartition.Name, v.FirstPartition.Rules)
			switch v.SubPartMeth {
			case "HASH":
				for _, w := range v.SubPartMeta {
					subRulesGather = append(subRulesGather, fmt.Sprintf("SUBPARTITION `%v`", w.Name))
				}
				if len(v.SubPartMeta) < partSumMax {
					for k := 0; k < partSumMax-len(v.SubPartMeta); k++ {
						subRulesGather = append(subRulesGather, fmt.Sprintf("SUBPARTITION `add_sub_%v`", k))
						record = append(record, global.HeterogeneousConvertRecord{
							Object:   "sub partition",
							Logo:     "miss partition",
							OldValue: "",
							NewValue: fmt.Sprintf("SUBPARTITION `add_sub_%v`", k),
							Reason:   "sub partition sum is missing and is less than the defined quantity",
						})
					}
				}
			}
			if len(subRulesGather) > 0 {
				pr = fmt.Sprintf("%v(\n\t%v\n\t)", pr, strings.Join(subRulesGather, ",\n\t"))
			}
			partitionRules = append(partitionRules, pr)
		case "LIST":
			partitionRules = append(partitionRules, fmt.Sprintf("PARTITION `%v` VALUES IN (%v)", v.FirstPartition.Name, v.FirstPartition.Rules))
		case "HASH":
			partitionRules = append(partitionRules, fmt.Sprintf("PARTITIONS %v ", len(pm)))
			break forLoop
		case "KEY":
			partitionRules = append(partitionRules, fmt.Sprintf("PARTITION %v ", len(pm)))
			break forLoop
		}
	}
	if len(partitionRules) > 0 {
		switch partMeth {
		case "HASH", "KEY":
			partitionTableSql = fmt.Sprintf("PARTITION BY %v %v %v \n %v \n", pm[0].FirstPartition.Meth, partBodyColumnHead, subpartBodyColumnHead, strings.Join(partitionRules, ",\n"))
		case "RANGE", "LIST":
			partitionTableSql = fmt.Sprintf("PARTITION BY %v %v %v (\n %v \n)", pm[0].FirstPartition.Meth, partBodyColumnHead, subpartBodyColumnHead, strings.Join(partitionRules, ",\n"))
		}
	}
	return
}
func createIndexQuit(s global.GetColumnTypeInput) bool {
	if len(s.TableMeta.IndexMeta) == 0 {
		return true
	}
	if im, ok := s.TableMeta.IndexMeta["source"]; !ok || len(im.IndexColumn) == 0 {
		return true
	}
	return false
}
func newIndexColumnAddPartColumn(im global.IndexMerge, partitionColumn []string) (newIndexColumn map[string][]string, recover []global.HeterogeneousConvertRecord) {
	newIndexColumn = make(map[string][]string)
	for k, v := range im.IndexColumn {
		if t, ok1 := im.IndexType[k]; ok1 {
			var indexColumn1 []string
			for _, w := range v {
				var column = fmt.Sprintf("`%v`", w.ColumnName)
				if _, err := strconv.Atoi(w.SubPart); err == nil {
					column = fmt.Sprintf("`%v`(%v)", w.ColumnName, w.SubPart)
				}
				indexColumn1 = append(indexColumn1, column)
			}
			var partitionColumn1 []string
			for _, y := range partitionColumn {
				partitionColumn1 = append(partitionColumn1, fmt.Sprintf("`%v`", y))
			}
			switch t {
			case "uni", "pri":
				oldIndexColumn := indexColumn1
				add, _ := ea.CheckSum().Arrcmp(partitionColumn1, indexColumn1)
				indexColumn1 = append(indexColumn1, add...)
				if len(add) > 0 {
					recover = append(recover, global.HeterogeneousConvertRecord{
						Object:   "index uniq",
						Logo:     k,
						OldValue: strings.Join(oldIndexColumn, ","),
						NewValue: strings.Join(indexColumn1, ","),
						Reason:   "primary or unique key must Include partition column",
					})
				}
			}
			newIndexColumn[k] = indexColumn1
		}
	}
	return
}
func getSingleIndexMaxLength(s global.GetColumnTypeInput, newIndexColumn map[string][]string, factor int) (res getSingleIndexMaxLengthResult) {
	res = getSingleIndexMaxLengthResult{
		newIndexColumnMeta: make(map[string][]global.TableMeta),
		indexLength:        make(map[string][]int),
		indexColumnGarth:   make(map[string]int),
		indexMaxLength:     make(map[string]int),
	}
	for k, v := range newIndexColumn {
		var newIndexLength int
		for _, w := range v {
			//处理前缀索引列
			if strings.Contains(w, "(") && strings.HasSuffix(w, ")") {
				if n := strings.Index(w, "("); n != -1 {
					w = w[:n]
				}
			}
			w = strings.ReplaceAll(w, "`", "")
			res.indexColumnGarth[w]++
			for _, l := range s.TableMeta.ColumnMeta["source"] {
				if strings.EqualFold(l.ColumnName, w) {
					_, p := TypeConvert(l, factor)
					newIndexLength += p
					if y, z := res.newIndexColumnMeta[k]; z {
						res.newIndexColumnMeta[k] = append(y, l)
					} else {
						res.newIndexColumnMeta[k] = []global.TableMeta{l}
					}
					if y, z := res.indexLength[k]; z {
						res.indexLength[k] = append(y, p)
					} else {
						res.indexLength[k] = []int{p}
					}
				}
			}
		}
		res.indexMaxLength[k] = newIndexLength
	}
	return
}
func getNewSingleIndexColumn(indexOptimizerResult getSingleIndexMaxLengthResult, newIndexColumn map[string][]string, factor int) (columnGather map[string] /*indexName + newIndexColumn */ []string, recover []global.HeterogeneousConvertRecord) {
	columnGather = make(map[string] /*indexName + newIndexColumn */ []string)
	for k, v := range indexOptimizerResult.indexMaxLength {
		var avgLength int
		if len(indexOptimizerResult.indexLength[k]) > 0 {
			avgLength = 3072 / len(indexOptimizerResult.indexLength[k])
		}
		if v > 3072 {
			var newColumnGarth []string
			for n, m := range indexOptimizerResult.indexLength[k] {
				if m > avgLength {
					w := newIndexColumn[k][n]
					if strings.Contains(w, "(") && strings.HasSuffix(w, ")") {
						if nl := strings.Index(w, "("); nl != -1 {
							w = w[:nl]
						}
					}
					newColumnGarth = append(newColumnGarth, fmt.Sprintf("%v(%v)", w, avgLength/factor))
				} else {
					newColumnGarth = append(newColumnGarth, fmt.Sprintf("%v", newIndexColumn[k][n]))
				}
			}
			recover = append(recover, global.HeterogeneousConvertRecord{
				Object:   "index size",
				Logo:     k,
				OldValue: strings.Join(newIndexColumn[k], ","),
				NewValue: strings.Join(newColumnGarth, ","),
				Reason:   "index length >3072",
			})
			columnGather[k] = newColumnGarth
		} else {
			var newColumnGarth []string
			for n := range indexOptimizerResult.indexLength[k] {
				newColumnGarth = append(newColumnGarth, fmt.Sprintf("%v", newIndexColumn[k][n]))
			}
			columnGather[k] = newColumnGarth
		}
	}
	return
}
func generalIndexSql(im global.IndexMerge, columnGather map[string][]string, newIndexColumn map[string][]string) (res string, notNull []string) {
	var creatText []string
	for k, v := range columnGather {
		switch im.IndexType[k] {
		case "mul":
			creatText = append(creatText, fmt.Sprintf("KEY `%v` (%v)", k, strings.Join(v, ",")))
		case "uni":
			var kk = k
			if strings.EqualFold(k, "primary") {
				kk = fmt.Sprintf("%v_uni", k)
			}
			creatText = append(creatText, fmt.Sprintf("UNIQUE KEY `%v` (%v)", kk, strings.Join(v, ",")))
		case "pri":
			notNull = newIndexColumn[k]
			creatText = append(creatText, fmt.Sprintf("PRIMARY KEY `%v` (%v)", k, strings.Join(v, ",")))
		}
	}
	res = fmt.Sprintf("%v", strings.Join(creatText, ",\n"))
	return
}
func CreateIndex(s global.GetColumnTypeInput, partitionColumn []string, factor int) (result CreateIndexReturnResult) {
	result.indexColumnGarth = make(map[string]int)
	if createIndexQuit(s) {
		return
	}
	im, _ := s.TableMeta.IndexMeta["source"]
	//索引名及对应的索引类型集合
	//indexNameToIndexType(im)
	newIndexColumn, columnRecover := newIndexColumnAddPartColumn(im, partitionColumn)
	result.recover = append(result.recover, columnRecover...)
	//获取索引列最大长度
	indexOptimizerResult := getSingleIndexMaxLength(s, newIndexColumn, factor)
	result.indexColumnGarth = indexOptimizerResult.indexColumnGarth
	//单个索引长度最大限制检查、前缀索引处理及长度超长改造
	columnGather, newColumnRecover := getNewSingleIndexColumn(indexOptimizerResult, newIndexColumn, factor)
	result.recover = append(result.recover, newColumnRecover...)
	result.indexSql, result.notNull = generalIndexSql(im, columnGather, newIndexColumn)
	return
}
func commentAction(prefix, s string) string {
	var replicationStr = []string{"'", ";"}
	switch {
	case s == "<entry>", s == "<null>", s == "''", s == "NULL":
		return fmt.Sprintf("%v ''", prefix)
	case len(s) == 0:
		return ""
	case func() bool {
		var e bool
		for _, v := range replicationStr {
			if strings.Contains(s, v) {
				e = true
			}
		}
		return e
	}():
		var q = s
		for _, v := range replicationStr {
			if strings.Contains(s, v) {
				q = strings.ReplaceAll(q, fmt.Sprintf("%v", v), fmt.Sprintf("\\%v", v))
			}
		}
		return fmt.Sprintf("%v '%v'", prefix, q)
	default:
		return fmt.Sprintf("%v '%v'", prefix, s)
	}
}
func createTableOptions(r []string, rr string) bool {
	for _, v := range r {
		if strings.EqualFold(v, rr) {
			return true
		}
	}
	return false
}
func createTableChartOptions(a global.TableMeta, r []string) (ch string) {
	if createTableOptions(r, "chart") {
		ch = nullEmpty(a.Charset)
		if len(ch) > 0 {
			ch = fmt.Sprintf(" CHARACTER SET %v ", ch)
		}
	}
	return
}
func createTableCollateOptions(a global.TableMeta, r []string) (ch string) {
	if createTableOptions(r, "collate") {
		ch = nullEmpty(a.CollationName)
		if len(ch) > 0 {
			ch = fmt.Sprintf(" COLLATE %v ", ch)
		}
	}
	return
}
func createTableInvisibleOptions(a global.TableMeta) (ch string) {
	if a.Invisible {
		return " /*!80023 INVISIBLE */"
	}
	return
}
func createTableAutoIncrementOptions(a global.TableMeta) (ch string) {
	if a.AutoIncrement {
		return "auto_increment"
	}
	return
}
func createTableDefaultOptions(a global.TableMeta, TypeConstraint, nullConstraint string) (recoverRecord global.HeterogeneousConvertRecord, ch string) {
	recoverRecord = global.HeterogeneousConvertRecord{
		Object:   "column default",
		Logo:     a.ColumnName,
		OldValue: a.ConvertDefaultValue.OldValue,
		NewValue: "",
	}
	if len(a.ColumnDefault) > 0 || strings.EqualFold(a.ColumnDefault, "''") {
		switch {
		case strings.EqualFold(TypeConstraint, "text"):
			recoverRecord.Reason = "text date Type not use default value"
		case strings.Contains(nullConstraint, "NOT NULL") && strings.EqualFold(DefaultValueConvert(TypeConstraint, a.ConvertColumnType.Type, a.ConvertDefaultValue), "NULL"):
			recoverRecord.Reason = "not null Constraint not use default null"
		default:
			ch = fmt.Sprintf("DEFAULT %v", DefaultValueConvert(TypeConstraint, a.ConvertColumnType.Type, a.ConvertDefaultValue))
		}
	}
	return
}
func createTableNullOptions(a global.TableMeta, notNull []string) (rR []global.HeterogeneousConvertRecord, ch string) {
	ch = fmt.Sprintf(" NULL ")
	if strings.EqualFold(a.IsNull, "NO") {
		ch = fmt.Sprintf(" NOT NULL ")
	} else {
		for _, z := range notNull {
			z = strings.ReplaceAll(z, "`", "")
			if strings.EqualFold(a.ColumnName, z) {
				ch = fmt.Sprintf(" NOT NULL ")
				rR = append(rR, global.HeterogeneousConvertRecord{
					Object:   "column null",
					Logo:     a.ColumnName,
					OldValue: "NULL",
					NewValue: ch,
					Reason:   "primary key must is not null",
				})
			}
		}
	}
	return
}
func createTableColumnTypeOptions(a global.TableMeta, factor int, varcharModifyText []string) (rR []global.HeterogeneousConvertRecord, ch string) {
	ch, _ = TypeConvert(a, factor)
	for _, b := range varcharModifyText {
		if strings.EqualFold(b, a.ColumnName) {
			switch a.ConvertColumnType.Type {
			case "varchar", "char":
				ch = "text"
				rR = append(rR, global.HeterogeneousConvertRecord{
					Object:   "table size",
					Logo:     b,
					OldValue: a.ColumnType,
					NewValue: ch,
					Reason:   "table length >65527",
				})
			}
		}
	}
	return
}
func createTableRowsSize(s global.GetColumnTypeInput, factor int) (maxSize, nullSum, colSum int, byteOrder []int, byteMerge map[int][]string) {
	var cByte int
	byteMerge = make(map[int][]string)
	for _, a := range s.TableMeta.ColumnMeta["source"] {
		_, cByte = TypeConvert(a, factor)
		maxSize += cByte
		if strings.EqualFold(a.IsNull, "YES") {
			nullSum++
		}
		colSum++
		if b, ok := byteMerge[cByte]; ok {
			byteMerge[cByte] = append(b, a.ColumnName)
		} else {
			byteMerge[cByte] = []string{a.ColumnName}
			byteOrder = append(byteOrder, cByte)
		}
	}
	//倒序排列
	sort.Slice(byteOrder, func(i, j int) bool {
		return byteOrder[i] > byteOrder[j]
	})
	return
}
func tableRowsNullSize(colSum int) (nullSize int) {
	if colSum > 0 {
		l := colSum / 8
		m := colSum % 8
		if m > 0 {
			l += 1
		}
		nullSize = l
	}
	nullSize += 6
	return
}
func createTableVarcharToText(s global.GetColumnTypeInput, indexResult CreateIndexReturnResult, factor int) (varcharModifyText []string) {
	//计算行长度
	/*
		行头长度计算指导原则
		列的数量：
			对于每 8 列，通常需要 1 字节来表示 NULL 位图。
		NULL 值的存在：
			如果某些列允许 NULL 值，行头长度可能会增加。
		一般情况下，如果有 NULL 值，行头长度会增加到 2 到 5 字节。
		行格式：
			COMPACT、REDUNDANT、DYNAMIC 和 COMPRESSED 行格式会影响行头的大小：
			COMPACT：较小的行头，通常为 1 到 2 字节（无 NULL 列时）。
			DYNAMIC：可能需要 4 到 6 字节。
	*/
	/*
		null值标识位，用bit来表示，一个bit 存储0、1，每个列根据存储
	*/
	//table 头部占用字节数
	/*
		1、记录头信息(必须)
		记录头信息固定占用5个字节也就是40位，不同位代表不同信息，主要有：
			delete_mask 标记该记录是否被删除
			record_type 表示当前记录的类型
			0表示普通记录，1表示B+树非叶子节点记录，2表示最小记录，3表示最大记录
			next_record 表示下一条记录的相对位置

		2、隐藏列(必须)
		隐藏列中的信息因为与事务和主键有关，所以很重要，总共占用19个字节，有三列：
			row_id (不必须) 替补主键id
			trx_id 事务id
			roll_pointer 回滚指针
			这里需要提一下InnoDB表对主键的生成策略：
			优先使用用户自定义主键作为主键，如果用户没有定义主键，则选取一个Unique键作为主键，如果表中连Unique键都没有定义的话，则InnoDB会为表默认添加一个名为row_id的隐藏列作为主键。
	*/
	maxSize, _, colSum, byteOrder, byteMerge := createTableRowsSize(s, factor)
	tableRowsNullSize(colSum)
	if maxSize > 65527 {
		tmpMaxSize := maxSize
	ll:
		for _, v := range byteOrder {
			for i := 0; i < len(byteMerge[v]); i++ {
				if tmpMaxSize < 65527 {
					break ll
				}
				if _, ok := indexResult.indexColumnGarth[byteMerge[v][i]]; !ok {
					varcharModifyText = append(varcharModifyText, byteMerge[v][i])
					tmpMaxSize = tmpMaxSize - v + 12 /* text 占用12个字节的外部指针长度*/
				}
			}
		}
	}
	return
}
func createTableBody(s global.GetColumnTypeInput, indexResult CreateIndexReturnResult, factor int) (recover []global.HeterogeneousConvertRecord, p string) {
	var (
		creatText       []string
		createTableHead = fmt.Sprintf("CREATE TABLE IF NOT EXISTS `%v`.`%v` (\n", s.TableInfo.Schema, s.TableInfo.Table)
	)
	if len(s.TableMeta.ColumnMeta) == 0 {
		return
	}
	for _, a := range s.TableMeta.ColumnMeta["source"] {
		rR1, TypeConstraint := createTableColumnTypeOptions(a, factor, createTableVarcharToText(s, indexResult, factor))
		recover = append(recover, rR1...)
		rR2, nullConstraint := createTableNullOptions(a, indexResult.notNull)
		recover = append(recover, rR2...)
		rR3, defaultValue := createTableDefaultOptions(a, TypeConstraint, nullConstraint)
		recover = append(recover, rR3)
		creatText = append(creatText, fmt.Sprintf("`%v` %v %v %v %v %v %v %v %v ", a.ColumnName,
			TypeConstraint,
			createTableChartOptions(a, s.Plan["column"]),
			createTableCollateOptions(a, s.Plan["column"]),
			nullConstraint,
			defaultValue,
			createTableAutoIncrementOptions(a),
			createTableInvisibleOptions(a),
			commentAction("COMMENT", a.ColumnComment)))
	}
	p = fmt.Sprintf("%v %v \n) %v %v %v ", createTableHead, strings.Join(creatText, fmt.Sprintf(",\t\n")), commentAction("DEFAULT CHARSET= ", s.TableMeta.Comment["source"].Character), commentAction("COLLATE= ", s.TableMeta.Comment["source"].Collate), commentAction("COMMENT ", s.TableMeta.Comment["source"].Comment))
	if len(indexResult.indexSql) > 0 {
		p = fmt.Sprintf("%v %v,\n %v \n) %v %v %v", createTableHead, strings.Join(creatText, fmt.Sprintf(",\t\n")), indexResult.indexSql, commentAction("DEFAULT CHARSET= ", s.TableMeta.Comment["source"].Character), commentAction("COLLATE= ", s.TableMeta.Comment["source"].Collate), commentAction("COMMENT ", s.TableMeta.Comment["source"].Comment))
	}
	return
}
func getGIPKSwitch(s global.GetColumnTypeInput) bool {
	var (
		err   error
		res   any
		event = "[MySQLGetGIPKSwitch]"
	)
	getSql := fmt.Sprintf("select VARIABLE_VALUE as `value` from performance_schema.global_variables where Variable_name='sql_generate_invisible_primary_key'")
	if _, res, err = ExecSelectSql(ExecSqlInputS{
		Ss:           s,
		Event:        event,
		SelectSqlStr: getSql,
		Expect:       ETL.String,
	}); err != nil || res == nil {
		if err != nil {
			err = errors.New(fmt.Sprintf("%v %v", event, err))
		}
		return false
	}
	if strings.EqualFold(strings.TrimSpace(fmt.Sprintf("%v", res)), "ON") {
		return true
	}
	return false
}
func AddInitGIPKData(s global.GetColumnTypeInput) (r global.GetColumnTypeInput) {
	var i = s.TableMeta.IndexMeta["source"]
	for _, c := range s.TableMeta.ColumnMeta["source"] {
		if c.AutoIncrement {
			return s
		}
	}
	if _, okk := i.IndexColumn["PRIMARY"]; !okk || len(i.IndexColumn) == 0 {
		var qq = make(map[string][]global.IndexColumn)
		for x, y := range i.IndexColumn {
			qq[x] = y
		}
		qq["PRIMARY"] = []global.IndexColumn{
			{
				Schema:     s.Schema,
				Table:      s.Table,
				IndexName:  "PRIMARY",
				ColumnType: "bigint",
				ColumnSeq:  "1",
				ColumnName: "my_row_id",
			},
		}
		i.IndexColumn = qq
		var pp = make(map[string]string)
		for x, y := range i.IndexType {
			pp[x] = y
		}
		pp["PRIMARY"] = "pri"
		i.IndexType = pp
		s.TableMeta.IndexMeta["source"] = i
		var c = s.TableMeta.ColumnMeta["source"]
		var d []global.TableMeta
		d = append(d, global.TableMeta{
			DatabaseName:  s.Schema,
			TableName:     s.Table,
			ColumnName:    "my_row_id",
			ColumnType:    "bigint",
			ColumnSeq:     "0",
			IsNull:        "NO",
			ColumnComment: "",
			DataType:      "int",
			DataLength:    "8",
			AutoIncrement: true,
			Invisible:     true,
			ConvertColumnType: global.ConvertColumnMeta{
				DataLength: 8,
				Symbol:     true,
				Type:       "int",
			},
		})
		d = append(d, c...)
		s.TableMeta.ColumnMeta["source"] = d
	}
	r = s
	return
}
func GIPKAction(s global.GetColumnTypeInput) (r global.GetColumnTypeInput) {
	if getGIPKSwitch(s) {
		r = AddInitGIPKData(s)
	} else {
		r = s
	}
	return
}

/*
CreateSql

	遇到的问题
	1: 索引长度超限（767），必要时可使用前缀索引或减少数据类型长度   已解决
	2：行长度超限（65535），超过优先转varchar最大的，逐层依次降低  已解决
	3：partition column 不能有comment，create table不受限制 alter comment 受限制   已解决
	4：partition column type 是decimal，需要转换为flot    已解决
	5：partition column type 是varchar,datetime，需要增加partition by range columns  已解决
	6：表存在主键索引或唯一索引时，需要包含分区列     已解决
	7：range 或list 分区 values less than (”) 存在单引号且数据类型为int类型，转换失败  已解决
	8：mysql体系不允许大字段有blobText，json，longtext 有默认值问  已解决
	9:  comment 中包含单引号或者;号问题     已解决
	10：bit(0) 不支持非法Error 3013 (HY000): Invalid size
	11: 排除行长度超限时将索引列转换为text
	12: mysql 开启sql_generate_invisible_primary_key后，创建无主键分区表报doesn't yet support 'generating invisible primary key for the partitioned tables'
*/
func (cs TableColumn) CreateSql(s global.GetColumnTypeInput) (result global.CreatSqlResult, err error) {
	var (
		factor int
		q      = GIPKAction(s)
	)
	if factor, err = getCharset(s); err != nil {
		return
	}
	record1, partitionColumn, tablePartitionBody := createTableSqlPartitionBody(&q)
	result.Record = append(result.Record, record1...)
	indexResult := CreateIndex(s, partitionColumn, factor)
	result.Record = append(result.Record, indexResult.recover...)
	record2, tableBody := createTableBody(q, indexResult, factor)
	result.Record = append(result.Record, record2...)
	result.Sql = fmt.Sprintf("%v %v;", tableBody, tablePartitionBody)
	return
}
