package executor

import (
	"fmt"
	"os"
	"regexp"
	"strconv"
	"strings"
	"time"

	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/logger"

	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/config"

	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/clickhouse"
	"gitlab.bangdao-tech.com/ops/ck-migrator/pkg/generator"

	"github.com/riete/gpool"
)

type Executor struct {
	rl *gpool.Limiter
}

func (e *Executor) WriteSqlToFile(source *clickhouse.Clickhouse, sqlFile string, reverse bool) {
	f, _ := os.Create(sqlFile)
	g := generator.New(source, f)
	defer f.Close()
	var params []*generator.GenerateSqlParams
	for _, database := range config.Config.DatabaseConfig {
		for _, table := range database.Tables {
			params = append(params, &generator.GenerateSqlParams{
				Database: database,
				Table:    table,
				Reverse:  reverse,
			})
		}
	}
	for _, param := range params {
		g.GenerateSql(param)
	}
}

func (e *Executor) RunSql(target *clickhouse.Clickhouse, sqlFile string, renameFailed bool) {
	failedCh := make(chan string)
	resultCh := make(chan string)
	p := gpool.NewGenericConcurrentPool(e.rl, target.Exec)
	sqls, err := os.ReadFile(sqlFile)
	if err != nil {
		logger.Fatal(err.Error())
	}
	var params []*clickhouse.ExecParams
	for _, sqlStr := range strings.Split(string(sqls), "\n") {
		if sqlStr != "" {
			sqlSplit := strings.Split(sqlStr, "===")
			sql := sqlSplit[0]
			dbTablePartition := sqlSplit[1]
			total, _ := strconv.Atoi(sqlSplit[2])
			
			// 提取分片信息
			shardInfo := ""
			isLastShard := false
			shardRegex := regexp.MustCompile(`%\s*(\d+)\s*=\s*(\d+)`)
			matches := shardRegex.FindStringSubmatch(sql)
			if len(matches) == 3 {
				currentShard, _ := strconv.Atoi(matches[2])
				totalShards := matches[1]
				shardInfo = strconv.Itoa(currentShard+1) + "/" + totalShards
				
				// 判断是否是最后一个分片（分片索引从0开始，所以最后一个分片索引是总分片数-1）
				totalShardsInt, _ := strconv.Atoi(totalShards)
				isLastShard = (currentShard == totalShardsInt - 1)
			}
			
			params = append(
				params,
				&clickhouse.ExecParams{
					Sql:              sql,
					Failed:           failedCh,
					Total:            total,
					DBTablePartition: dbTablePartition,
					Result:           resultCh,
					OriSqlString:     sqlStr,
					ShardInfo:        shardInfo,
					IsLastShard:      isLastShard, // 添加是否是最后一个分片的标记
				},
			)
		}
	}
	go func() {
		defer close(resultCh)
		p.Run(int64(config.Config.Concurrency), params, nil) // 使用Concurrency代替QPS
	}()
	go func() {
		defer close(failedCh)
		for i := range resultCh {
			logger.Info(i)
		}
	}()
	
	// 检查是否存在failed.sql文件，如果存在则重命名为带时间戳的备份文件
	failedSqlFile := "failed.sql"
	if renameFailed {
		if _, err := os.Stat(failedSqlFile); err == nil {
			// 文件存在，重命名为带时间戳的备份文件
			timestamp := time.Now().Format("20060102150405")
			backupFileName := failedSqlFile + "." + timestamp
			if err := os.Rename(failedSqlFile, backupFileName); err != nil {
				logger.Errorf("无法重命名 %s 为 %s: %s", failedSqlFile, backupFileName, err.Error())
			} else {
				logger.Info(fmt.Sprintf("已将 %s 重命名为 %s", failedSqlFile, backupFileName))
			}
		}
	}
	
	f, err := os.Create(failedSqlFile)
	if err != nil {
		logger.Fatal(err.Error())
	}
	defer f.Close()
	for i := range failedCh {
		_, err = f.WriteString(i + "\n")
		if err != nil {
			logger.Fatal(err.Error())
		}
	}
}

func New() *Executor {
	return &Executor{rl: gpool.NewLimiter(config.Config.Concurrency, config.Config.Concurrency)} // 使用Concurrency代替QPS
}