// Copyright 2019-present Facebook Inc. All rights reserved.
// This source code is licensed under the Apache 2.0 license found
// in the LICENSE file in the root directory of this source tree.

package schema

import (
	"context"
	"fmt"
	"gitee.com/damengde/atlas/sql/dm"
	"math"
	"reflect"
	"strconv"
	"strings"

	"gitee.com/damengde/ent/dialect"
	"gitee.com/damengde/ent/dialect/entsql"
	"gitee.com/damengde/ent/dialect/sql"
	"gitee.com/damengde/ent/schema/field"

	"gitee.com/damengde/atlas/sql/migrate"
	"gitee.com/damengde/atlas/sql/schema"
)

// Dm is a Dameng migration driver.
type Dm struct {
	dialect.Driver
	schema  string
	version string
}

// init loads the Dm version from the database for later use in the migration process.
func (d *Dm) init(ctx context.Context) error {
	rows := &sql.Rows{}
	if err := d.Query(ctx, `select svr_version,build_version,ID_CODE from SYS."V$INSTANCE";`, []any{}, rows); err != nil {
		return fmt.Errorf("dm: querying dm version %w", err)
	}
	defer rows.Close()
	if !rows.Next() {
		if err := rows.Err(); err != nil {
			return err
		}
		return fmt.Errorf("dm: version variable was not found")
	}
	version := make([]string, 2)
	if err := rows.Scan(&version[0], &version[1]); err != nil {
		return fmt.Errorf("dm: scanning dm version: %w", err)
	}
	d.version = version[1]
	return nil
}

func (d *Dm) tableExist(ctx context.Context, conn dialect.ExecQuerier, name string) (bool, error) {
	query, args := sql.Select(sql.Count("*")).From(sql.Table("user_tables")).Where(sql.And(
		sql.EQ("TABLE_NAME", name),
	)).Query()
	return exist(ctx, conn, query, args...)
}

func (d *Dm) fkExist(ctx context.Context, tx dialect.Tx, name string) (bool, error) {
	return false, nil
}

// table loads the current table description from the database.
func (d *Dm) table(ctx context.Context, tx dialect.Tx, name string) (*Table, error) {
	rows := &sql.Rows{}
	query, args := sql.Select(
		"COLUMN_NAME", "DATA_TYPE", "NULLABLE", "DATA_DEFAULT", "DATA_PRECISION", "DATA_SCALE", "DATA_LENGTH",
	).
		From(sql.Table("ALL_TAB_COLUMNS")).
		Where(sql.And(
			sql.EQ("TABLE_NAME", name)),
		).Query()
	if err := tx.Query(ctx, query, args, rows); err != nil {
		return nil, fmt.Errorf("dm: reading table description %w", err)
	}
	// Call Close in cases of failures (Close is idempotent).
	defer rows.Close()
	t := NewTable(name)
	for rows.Next() {
		c := &Column{}
		if err := d.scanColumn(c, rows); err != nil {
			return nil, fmt.Errorf("dm: %w", err)
		}
		t.AddColumn(c)
	}
	if err := rows.Err(); err != nil {
		return nil, err
	}
	if err := rows.Close(); err != nil {
		return nil, fmt.Errorf("dm: closing rows %w", err)
	}
	indexes, err := d.indexes(ctx, tx, t)
	if err != nil {
		return nil, err
	}
	// Add and link indexes to table columns.
	for _, idx := range indexes {
		t.addIndex(idx)
	}
	return t, nil
}

// table loads the table indexes from the database.
func (d *Dm) indexes(ctx context.Context, tx dialect.Tx, t *Table) ([]*Index, error) {
	rows := &sql.Rows{}
	query, args := sql.Select("a.CONSTRAINT_TYPE", "b.COLUMN_NAME").
		From(sql.Table("DBA_CONSTRAINTS").As("a")).
		Join(sql.Table("ALL_CONS_COLUMNS").As("b")).
		On("a.CONSTRAINT_NAME", "b.CONSTRAINT_NAME").
		Where(sql.And(
			sql.EQ("TABLE_NAME", t.Name),
		)).
		OrderBy("a.CONSTRAINT_TYPE").
		Query()
	if err := tx.Query(ctx, query, args, rows); err != nil {
		return nil, fmt.Errorf("dm: reading index description %w", err)
	}
	defer rows.Close()
	idx, err := d.scanIndexes(rows, t)
	if err != nil {
		return nil, fmt.Errorf("dm: %w", err)
	}
	return idx, nil
}

func (d *Dm) setRange(ctx context.Context, conn dialect.ExecQuerier, t *Table, value int64) error {
	return nil
}

func (d *Dm) verifyRange(ctx context.Context, tx dialect.ExecQuerier, t *Table, expected int64) error {
	return nil
}

// tBuilder returns the Dm DSL query for table creation.
func (d *Dm) tBuilder(t *Table) *sql.TableBuilder {
	b := sql.CreateTable(t.Name).IfNotExists()
	for _, c := range t.Columns {
		b.Column(d.addColumn(c))
	}
	for _, pk := range t.PrimaryKey {
		b.PrimaryKey(pk.Name)
	}
	// Charset and collation config on Dm table.
	// These options can be overridden by the entsql annotation.
	if t.Annotation != nil {
		if opts := t.Annotation.Options; opts != "" {
			b.Options(opts)
		}
		addChecks(b, t.Annotation)
	}
	return b
}

// cType returns the Dm string type for the given column.
func (d *Dm) cType(c *Column) (t string) {
	if c.SchemaType != nil && c.SchemaType[dialect.Dm] != "" {
		// Dm returns the column type lower cased.
		return strings.ToLower(c.SchemaType[dialect.Dm])
	}

	switch c.Type {
	case field.TypeBool:
		t = dm.TypeBit
	case field.TypeInt8:
		t = dm.TypeTinyint
	case field.TypeUint8:
		t = dm.TypeInt
	case field.TypeInt16:
		t = dm.TypeInt
	case field.TypeUint16:
		t = dm.TypeInt
	case field.TypeInt32:
		t = dm.TypeInt
	case field.TypeUint32:
		t = dm.TypeBigint
	case field.TypeInt, field.TypeInt64:
		t = dm.TypeBigint
	case field.TypeUint, field.TypeUint64:
		t = dm.TypeBigint
	case field.TypeBytes:
		t = dm.TypeVarbinary
	case field.TypeJSON:
		t = dm.TypeText
	case field.TypeString:
		size := c.Size
		if size == 0 {
			size = d.defaultSize(c)
		}
		switch {
		case c.typ == "tinytext", c.typ == "text":
			t = dm.TypeText
		case size <= math.MaxUint16:
			t = fmt.Sprintf("VARCHAR(%d)", size)
		case size == 1<<24-1:
			t = dm.TypeText
		default:
			t = dm.TypeText
		}
	case field.TypeFloat32, field.TypeFloat64:
		t = dm.TypeNumeric
	case field.TypeTime:
		t = c.scanTypeOr("timestamp")
	case field.TypeEnum:
		t = "VARCHAR(50)"
	case field.TypeUUID:
		t = "CHAR(36)"
	case field.TypeOther:
		t = c.typ
	default:
		panic(fmt.Sprintf("unsupported type %q for column %q", c.Type.String(), c.Name))
	}
	return t
}

// addColumn returns the DSL query for adding the given column to a table.
// The syntax/order is: datatype [Charset] [Unique|Increment] [Collation] [Nullable].
func (d *Dm) addColumn(c *Column) *sql.ColumnBuilder {
	b := sql.Column(c.Name).Type(d.cType(c)).Attr(c.Attr)
	c.unique(b)
	if c.Increment {
		b.Attr("AUTO_INCREMENT")
	}
	c.nullable(b)
	c.defaultValue(b)
	return b
}

// addIndex returns the querying for adding an index to Dm.
func (d *Dm) addIndex(i *Index, table string) *sql.IndexBuilder {
	idx := sql.CreateIndex(i.Name).Table(table)
	if i.Unique {
		idx.Unique()
	}
	parts := indexParts(i)
	for _, c := range i.Columns {
		part, ok := parts[c.Name]
		if !ok || part == 0 {
			idx.Column(c.Name)
		} else {
			idx.Column(fmt.Sprintf("%s(%d)", idx.Builder.Quote(c.Name), part))
		}
	}
	return idx
}

// dropIndex drops a Dm index.
func (d *Dm) dropIndex(ctx context.Context, tx dialect.Tx, idx *Index, table string) error {
	query, args := idx.DropBuilder(table).Query()
	return tx.Exec(ctx, query, args, nil)
}

// prepare runs preparation work that needs to be done to apply the change-set.
func (d *Dm) prepare(ctx context.Context, tx dialect.Tx, change *changes, table string) error {
	for _, idx := range change.index.drop {
		switch n := len(idx.columns); {
		case n == 0:
			return fmt.Errorf("index %q has no columns", idx.Name)
		case n > 1:
			continue // not a foreign-key index.
		}
		var qr sql.Querier
	Switch:
		switch col, ok := change.dropColumn(idx.columns[0]); {
		// If both the index and the column need to be dropped, the foreign-key
		// constraint that is associated with them need to be dropped as well.
		case ok:
			names, err := d.fkNames(ctx, tx, table, col.Name)
			if err != nil {
				return err
			}
			if len(names) == 1 {
				qr = sql.AlterTable(table).DropForeignKey(names[0])
			}
		// If the uniqueness was dropped from a foreign-key column,
		// create a "simple index" if no other index exist for it.
		case !ok && idx.Unique && len(idx.Columns) > 0:
			col := idx.Columns[0]
			for _, idx2 := range col.indexes {
				if idx2 != idx && len(idx2.columns) == 1 {
					break Switch
				}
			}
			names, err := d.fkNames(ctx, tx, table, col.Name)
			if err != nil {
				return err
			}
			if len(names) == 1 {
				qr = sql.CreateIndex(names[0]).Table(table).Columns(col.Name)
			}
		}
		if qr != nil {
			query, args := qr.Query()
			if err := tx.Exec(ctx, query, args, nil); err != nil {
				return err
			}
		}
	}
	return nil
}

// 结果匹配
func (d *Dm) scanColumn(c *Column, rows *sql.Rows) error {
	var (
		nullable         sql.NullString
		defaults         sql.NullString
		numericPrecision sql.NullInt64
		numericScale     sql.NullInt64
		valLength        sql.NullInt64
	)
	if err := rows.Scan(&c.Name, &c.typ, &nullable, &defaults, &numericPrecision, &numericScale, &valLength); err != nil {
		return fmt.Errorf("scanning column description: %w", err)
	}
	if nullable.Valid {
		c.Nullable = nullable.String == "YES"
	}
	if c.typ == "" {
		return fmt.Errorf("missing type information for column %q", c.Name)
	}

	switch c.typ {
	case dm.TypeBit:
		c.Type = field.TypeBool
	case dm.TypeChar, dm.TypeCharacter, dm.TypeVarchar:
		c.Type = field.TypeString
		if valLength.Valid {
			c.Size = valLength.Int64
		}

	case dm.TypeNumeric, dm.TypeNumber, dm.TypeDecimal, dm.TypeDec:
		c.Type = field.TypeFloat64
		// If precision is specified then we should take that into account.
		if numericPrecision.Valid {
			schemaType := fmt.Sprintf("%s(%d,%d)", c.typ, numericPrecision.Int64, numericScale.Int64)
			c.SchemaType = map[string]string{dialect.Dm: schemaType}
		}
		if valLength.Valid {
			c.Size = valLength.Int64
		}

	case dm.TypeInteger, dm.TypeBigint, dm.TypeInt:
		c.Type = field.TypeInt64
		if valLength.Valid {
			c.Size = valLength.Int64
		}
	case dm.TypeTinyint, dm.TypeByte:
		c.Type = field.TypeInt8
		if valLength.Valid {
			c.Size = valLength.Int64
		}
	case dm.TypeSmallint:
		c.Type = field.TypeInt16
		if valLength.Valid {
			c.Size = valLength.Int64
		}
	case dm.TypeBinary, dm.TypeVarbinary:
		c.Type = field.TypeBytes
	case dm.TypeReal, dm.TypeFloat, dm.TypeDouble, dm.TypeDoublePrecision:
		c.Type = field.TypeFloat64
	case dm.TypeDate, dm.TypeTime, dm.TypeTimestamp, dm.TypeDatetime:
		c.Type = field.TypeTime
		// The mapping from schema defaults to database
		// defaults is not supported for TypeTime fields.
		defaults = sql.NullString{}
	case dm.TypeText, dm.TypeLongvarchar:
		c.Type = field.TypeString
		c.Size = math.MaxUint32

	case dm.TypeImage, dm.TypeLongvarbinary, dm.TypeBfile:
		c.Type = field.TypeBytes
		c.Size = math.MaxUint32

	case dm.TypeBlob, dm.TypeClob:
		c.Type = field.TypeString
		if valLength.Valid {
			c.Size = valLength.Int64
		}
	default:
		return fmt.Errorf("unknown column type %q for version %q", c.Type, d.version)
	}
	if defaults.Valid {
		return c.ScanDefault(defaults.String)
	}
	return nil
}

// scanIndexes scans sql.Rows into an Indexes list. The query for returning the rows,
// should return the following 5 columns: INDEX_NAME, COLUMN_NAME, SUB_PART, NON_UNIQUE,
// SEQ_IN_INDEX. SEQ_IN_INDEX specifies the position of the column in the index columns.
func (d *Dm) scanIndexes(rows *sql.Rows, t *Table) (Indexes, error) {
	var (
		i     Indexes
		names = make(map[string]*Index)
	)
	for rows.Next() {
		var (
			name   string
			column string
		)
		if err := rows.Scan(&name, &column); err != nil {
			return nil, fmt.Errorf("scanning index description: %w", err)
		}
		// Skip primary keys.
		if name == "P" {
			c, ok := t.column(column)
			if !ok {
				return nil, fmt.Errorf("missing primary-key column: %q", column)
			}
			t.PrimaryKey = append(t.PrimaryKey, c)
			continue
		}

		idx, ok := names[name]
		if !ok {
			idx = &Index{Name: name, Unique: name == "U", Annotation: &entsql.IndexAnnotation{}}
			i = append(i, idx)
			names[name] = idx
		}

		idx.columns = append(idx.columns, column)
	}
	if err := rows.Err(); err != nil {
		return nil, err
	}
	return i, nil
}

// isImplicitIndex reports if the index was created implicitly for the unique column.
func (d *Dm) isImplicitIndex(idx *Index, col *Column) bool {
	// We execute `CHANGE COLUMN` on older versions of Dm (<8.0), which
	// auto create the new index. The old one, will be dropped in `changeSet`.
	if compareVersions(d.version, "8.0.0") >= 0 {
		return idx.Name == col.Name && col.Unique
	}
	return false
}

// renameColumn returns the statement for renaming a column in
// Dm based on its version.
func (d *Dm) renameColumn(t *Table, old, new *Column) sql.Querier {
	q := sql.AlterTable(t.Name)
	return q.ChangeColumn(old.Name, d.addColumn(new))
}

// renameIndex returns the statement for renaming an index.
func (d *Dm) renameIndex(t *Table, old, new *Index) sql.Querier {
	q := sql.AlterTable(t.Name)
	return q.DropIndex(old.Name).AddIndex(new.Builder(t.Name))
}

// matchSchema returns the predicate for matching table schema.
func (d *Dm) matchSchema(columns ...string) *sql.Predicate {
	column := "TABLE_SCHEMA"
	if len(columns) > 0 {
		column = columns[0]
	}
	if d.schema != "" {
		return sql.EQ(column, d.schema)
	}
	return sql.EQ(column, sql.Raw("(SELECT DATABASE())"))
}

// tables returns the query for getting the in the schema.
func (d *Dm) tables() sql.Querier {
	return sql.Select("TABLE_NAME").
		From(sql.Table("TABLES").Schema("INFORMATION_SCHEMA")).
		Where(d.matchSchema())
}

// alterColumns returns the queries fo+
// r applying the columns change-set.
func (d *Dm) alterColumns(table string, add, modify, drop []*Column) sql.Queries {
	b := sql.Dialect(dialect.Dm).AlterTable(table)
	for _, c := range add {
		b.AddColumn(d.addColumn(c))
	}
	for _, c := range modify {
		b.ModifyColumn(d.addColumn(c))
	}
	for _, c := range drop {
		b.DropColumn(sql.Dialect(dialect.Dm).Column(c.Name))
	}
	if len(b.Queries) == 0 {
		return nil
	}
	return sql.Queries{b}
}

// normalizeJSON normalize MariaDB longtext columns to type JSON.
func (d *Dm) normalizeJSON(ctx context.Context, tx dialect.Tx, t *Table) error {
	columns := make(map[string]*Column)
	for _, c := range t.Columns {
		if c.typ == "longtext" {
			columns[c.Name] = c
		}
	}
	if len(columns) == 0 {
		return nil
	}
	rows := &sql.Rows{}
	query, args := sql.Select("CONSTRAINT_NAME").
		From(sql.Table("CHECK_CONSTRAINTS").Schema("INFORMATION_SCHEMA")).
		Where(sql.And(
			d.matchSchema("CONSTRAINT_SCHEMA"),
			sql.EQ("TABLE_NAME", t.Name),
			sql.Like("CHECK_CLAUSE", "json_valid(%)"),
		)).
		Query()
	if err := tx.Query(ctx, query, args, rows); err != nil {
		return fmt.Errorf("dm: query table constraints %w", err)
	}
	// Call Close in cases of failures (Close is idempotent).
	defer rows.Close()
	names := make([]string, 0, len(columns))
	if err := sql.ScanSlice(rows, &names); err != nil {
		return fmt.Errorf("dm: scan table constraints: %w", err)
	}
	if err := rows.Err(); err != nil {
		return err
	}
	if err := rows.Close(); err != nil {
		return err
	}
	for _, name := range names {
		c, ok := columns[name]
		if ok {
			c.Type = field.TypeJSON
		}
	}
	return nil
}

// fkNames returns the foreign-key names of a column.
func (d *Dm) fkNames(ctx context.Context, tx dialect.Tx, table, column string) ([]string, error) {
	query, args := sql.Select("CONSTRAINT_NAME").From(sql.Table("KEY_COLUMN_USAGE").Schema("INFORMATION_SCHEMA")).
		Where(sql.And(
			sql.EQ("TABLE_NAME", table),
			sql.EQ("COLUMN_NAME", column),
			// NULL for unique and primary-key constraints.
			sql.NotNull("POSITION_IN_UNIQUE_CONSTRAINT"),
			d.matchSchema(),
		)).
		Query()
	var (
		names []string
		rows  = &sql.Rows{}
	)
	if err := tx.Query(ctx, query, args, rows); err != nil {
		return nil, fmt.Errorf("dm: reading constraint names %w", err)
	}
	defer rows.Close()
	if err := sql.ScanSlice(rows, &names); err != nil {
		return nil, err
	}
	return names, nil
}

// defaultSize returns the default size for Dm/MariaDB varchar type
// based on column size, charset and table indexes, in order to avoid
// index prefix key limit (767) for older versions of Dm/MariaDB.
func (d *Dm) defaultSize(c *Column) int64 {
	size := DefaultStringLen
	return size
}

// needsConversion reports if column "old" needs to be converted
// (by table altering) to column "new".
func (d *Dm) needsConversion(old, new *Column) bool {
	return d.cType(old) != d.cType(new)
}

// indexParts returns a map holding the sub_part mapping if exists.
// Atlas integration.

func (d *Dm) atOpen(conn dialect.ExecQuerier) (migrate.Driver, error) {
	return dm.Open(&db{ExecQuerier: conn})
}

func (d *Dm) atTable(t1 *Table, t2 *schema.Table) {
	if opts := t1.Annotation.Options; opts != "" {
		t2.AddAttrs(&dm.CreateOptions{
			V: opts,
		})
	}
}

func (d *Dm) supportsDefault(c *Column) bool {
	return c.supportDefault()
}

func (d *Dm) atTypeC(c1 *Column, c2 *schema.Column) error {
	var t schema.Type
	switch c1.Type {
	case field.TypeBool:
		t = &schema.BoolType{T: dm.TypeBit}
	case field.TypeInt8:
		t = &schema.IntegerType{T: dm.TypeTinyint}
	case field.TypeUint8:
		t = &schema.IntegerType{T: dm.TypeInt, Unsigned: true}
	case field.TypeInt16:
		t = &schema.IntegerType{T: dm.TypeInt}
	case field.TypeUint16:
		t = &schema.IntegerType{T: dm.TypeInt, Unsigned: true}
	case field.TypeInt32:
		t = &schema.IntegerType{T: dm.TypeInt}
	case field.TypeUint32:
		t = &schema.IntegerType{T: dm.TypeInt, Unsigned: true}
	case field.TypeInt, field.TypeInt64:
		t = &schema.IntegerType{T: dm.TypeBigint}
	case field.TypeUint, field.TypeUint64:
		t = &schema.IntegerType{T: dm.TypeBigint, Unsigned: true}
	case field.TypeBytes:
		size := int64(math.MaxUint16)
		if c1.Size > 0 {
			size = c1.Size
		}
		switch {
		case size <= math.MaxUint8:
			t = &schema.BinaryType{T: dm.TypeInt}
		case size <= math.MaxUint16:
			t = &schema.BinaryType{T: dm.TypeInt}
		case size < 1<<24:
			t = &schema.BinaryType{T: dm.TypeBigint}
		case size <= math.MaxUint32:
			t = &schema.BinaryType{T: dm.TypeBigint}
		}
	case field.TypeJSON:
		t = &schema.JSONType{T: dm.TypeText}
	case field.TypeString:
		size := c1.Size
		if size == 0 {
			size = d.defaultSize(c1)
		}
		switch {
		case size <= math.MaxUint16:
			t = &schema.StringType{T: dm.TypeVarchar, Size: int(size)}
		default:
			t = &schema.StringType{T: dm.TypeText}
		}
	case field.TypeFloat32, field.TypeFloat64:
		t = &schema.FloatType{T: c1.scanTypeOr(dm.TypeDouble)}
	case field.TypeTime:
		t = &schema.TimeType{T: c1.scanTypeOr(dm.TypeTimestamp)}
	case field.TypeEnum:
		var maxLen int
		for _, enum := range c1.Enums {
			if len(enum) > maxLen {
				maxLen = len(enum)
			}
		}
		t = &schema.StringType{T: dm.TypeVarchar, Size: maxLen + 10}
	case field.TypeUUID:
		t = &schema.StringType{T: dm.TypeChar, Size: 36}
		c2.SetCollation("1")
	default:
		t = &schema.StringType{T: dm.TypeText}
	}
	c2.Type.Type = t
	return nil
}

func (d *Dm) atUniqueC(t1 *Table, c1 *Column, t2 *schema.Table, c2 *schema.Column) {
	// For UNIQUE columns, Dm create an implicit index
	// named as the column with an extra index in case the
	// name is already taken (<e.g. c>, <c_2>, <c_3>, ...).
	for _, idx := range t1.Indexes {
		// Index also defined explicitly, and will be add in atIndexes.
		if idx.Unique && d.atImplicitIndexName(idx, c1) {
			return
		}
	}
	t2.AddIndexes(schema.NewUniqueIndex(c1.Name).AddColumns(c2))
}

func (d *Dm) atIncrementC(t *schema.Table, c *schema.Column) {
	if c.Default != nil {
		t.Attrs = removeAttr(t.Attrs, reflect.TypeOf(&dm.AutoIncrement{}))
	} else {
		c.AddAttrs(&dm.AutoIncrement{})
	}
}

func (d *Dm) atIncrementT(t *schema.Table, v int64) {
	t.AddAttrs(&dm.AutoIncrement{V: v})
}

func (d *Dm) atImplicitIndexName(idx *Index, c1 *Column) bool {
	if idx.Name == c1.Name {
		return true
	}
	if !strings.HasPrefix(idx.Name, c1.Name+"_") {
		return false
	}
	i, err := strconv.ParseInt(strings.TrimLeft(idx.Name, c1.Name+"_"), 10, 64)
	return err == nil && i > 1
}

func (d *Dm) atIndex(idx1 *Index, t2 *schema.Table, idx2 *schema.Index) error {
	prefix := indexParts(idx1)
	for _, c1 := range idx1.Columns {
		c2, ok := t2.Column(c1.Name)
		if !ok {
			return fmt.Errorf("unexpected index %q column: %q", idx1.Name, c1.Name)
		}
		part := &schema.IndexPart{C: c2}
		if v, ok := prefix[c1.Name]; ok {
			part.AddAttrs(&dm.SubPart{Len: int(v)})
		}
		idx2.AddParts(part)
	}
	if t, ok := indexType(idx1, dialect.Dm); ok {
		idx2.AddAttrs(&dm.IndexType{T: t})
	}
	return nil
}

func (Dm) atTypeRangeSQL(ts ...string) string {
	for i := range ts {
		ts[i] = fmt.Sprintf("('%s')", ts[i])
	}
	return fmt.Sprintf(`INSERT INTO "%s" ("type") VALUES %s`, TypeTable, strings.Join(ts, " , "))
}
