package generator

import (
	"fmt"
	"os"
	"strings"

	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/logger"

	"github.com/riete/errors"
	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/clickhouse"
	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/config"
	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/migrator"
)

type SqlForRun struct {
	Sqls      []string
	Total     uint64
	Partition string
	Database  string
	Table     string
	Err       errors.Error
}

func (s SqlForRun) SqlsForWrite() []string {
	var sqls []string
	for _, sql := range s.Sqls {
		sqls = append(sqls, fmt.Sprintf("%s===%s.%s.%s===%d", sql, s.Database, s.Table, s.Partition, s.Total))
	}
	return sqls
}

type GenerateSqlParams struct {
	Database config.Database
	Table    config.Table
	Reverse  bool
}

type Generator struct {
	m *migrator.Migrator
	w *os.File
}

func (g *Generator) GenerateSql(p *GenerateSqlParams) {
	var err errors.Error
	tableName := p.Table.Source
	if p.Table.Distributed {
		tableName, err = g.m.GetDistributedLocalTableName(p.Database.Source, p.Table.Source)
		if err != nil {
			logger.Fatal(err.Trace(fmt.Sprintf("获取分布式表 %s.%s local table 失败", p.Database.Source, p.Table.Source)).Stack())
		}
	}
	partitionKey, sortingKey, err := g.m.GetPartitionAndSortingKey(p.Database.Source, tableName)
	if err != nil {
		logger.Fatal(err.Trace(fmt.Sprintf("获取 %s.%s partitionKey 和 sortingKey 失败", p.Database.Source, p.Table.Source)).Stack())
	}
	partitions, isPartition, err := g.m.GetPartitionList(p.Database.Source, tableName, p.Reverse)
	if err != nil {
		logger.Fatal(err.Trace(fmt.Sprintf("获取 %s.%s partition list失败", p.Database.Source, p.Table.Source)).Stack())
	}
	sqlBuilder := migrator.MigrateSqlBuilder{
		SourceDatabase:   p.Database.Source,
		SourceSortingKey: sortingKey,
		TargetDatabase:   p.Database.Target,
		Table:            p.Table,
	}

	if !isPartition {
		total, batchCount, err := g.m.DetermineBatchCount(p.Database.Source, p.Table.Source, "", "", p.Table.Condition)
		if err != nil {
			logger.Fatal(err.Trace(fmt.Sprintf("获取 %s.%s batch count失败", p.Database.Source, p.Table.Source)).Stack())
		}
		s := &SqlForRun{
			Sqls:      sqlBuilder.Build(int(batchCount), "", ""),
			Total:     total,
			Partition: "-",
			Database:  p.Database.Source,
			Table:     p.Table.Source,
		}
		_, wErr := g.w.WriteString(strings.Join(s.SqlsForWrite(), "\n") + "\n")
		if wErr != nil {
			logger.Fatal(wErr.Error())
		}
	} else {
		for _, partitionValue := range partitions {
			total, batchCount, err := g.m.DetermineBatchCount(p.Database.Source, p.Table.Source, partitionKey, partitionValue, p.Table.Condition)
			if err != nil {
				logger.Fatal(err.Trace(fmt.Sprintf("获取 %s.%s batch count失败", p.Database.Source, p.Table.Source)).Stack())
			}
			s := &SqlForRun{
				Sqls:      sqlBuilder.Build(int(batchCount), partitionKey, partitionValue),
				Total:     total,
				Partition: partitionValue,
				Database:  p.Database.Source,
				Table:     p.Table.Source,
			}
			_, wErr := g.w.WriteString(strings.Join(s.SqlsForWrite(), "\n") + "\n")
			if wErr != nil {
				logger.Fatal(wErr.Error())
			}
		}
	}
}

func New(source *clickhouse.Clickhouse, writer *os.File) *Generator {
	return &Generator{m: migrator.New(source), w: writer}
}
