package main

import (
	"bytes"
	"database/sql"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"math/rand"
	"net/http"
	"os"
	"runtime"
	"strings"
	"sync"
	"sync/atomic"
	"time"

	_ "github.com/go-sql-driver/mysql"
	"github.com/spf13/cobra"
)

// 定义响应结构体
type LoadResponse struct {
	TxnId                  int64  `json:"TxnId"`
	Label                  string `json:"Label"`
	Comment                string `json:"Comment"`
	TwoPhaseCommit         string `json:"TwoPhaseCommit"`
	Status                 string `json:"Status"`
	Message                string `json:"Message"`
	NumberTotalRows        int    `json:"NumberTotalRows"`
	NumberLoadedRows       int    `json:"NumberLoadedRows"`
	NumberFilteredRows     int    `json:"NumberFilteredRows"`
	NumberUnselectedRows   int    `json:"NumberUnselectedRows"`
	LoadBytes              int    `json:"LoadBytes"`
	LoadTimeMs             int    `json:"LoadTimeMs"`
	BeginTxnTimeMs         int    `json:"BeginTxnTimeMs"`
	StreamLoadPutTimeMs    int    `json:"StreamLoadPutTimeMs"`
	ReadDataTimeMs         int    `json:"ReadDataTimeMs"`
	WriteDataTimeMs        int    `json:"WriteDataTimeMs"`
	ReceiveDataTimeMs      int    `json:"ReceiveDataTimeMs"`
	CommitAndPublishTimeMs int    `json:"CommitAndPublishTimeMs"`
}

// 创建表，仅打印建表语句
func createTable(batchName string, extraColumns int, mode string) error {
	tableName := fmt.Sprintf("uk_test_%s", batchName)
	columns := []string{
		"`uk_id` decimal(22,0) NULL",
		"`cust_id` decimal(22,0) NULL",
		"`channel_id` decimal(16,0) NULL",
		"`create_date` datetime NULL",
		"`status_cd` varchar(20) NULL",
		"`status_date` datetime NULL",
	}

	for i := 1; i <= extraColumns; i++ {
		columns = append(columns, fmt.Sprintf("`col_%d` varchar(255) NULL", i))
	}

	tableType := "DUPLICATE KEY(`uk_id`)"
	if mode == "unique" {
		tableType = "UNIQUE KEY(uk_id)"
	}

	createTableSQL := fmt.Sprintf(`
    CREATE TABLE IF NOT EXISTS %s (
        %s
    ) ENGINE=OLAP
    %s
    DISTRIBUTED BY HASH(uk_id) BUCKETS 50;
    `, tableName, strings.Join(columns, ", "), tableType)

	fmt.Println(createTableSQL)
	return nil
}

// 生成随机字符串
func randomString(length int) string {
	charset := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
	result := make([]byte, length)
	for i := range result {
		result[i] = charset[rand.Intn(len(charset))]
	}
	return string(result)
}

// 生成随机状态枚举值
func getRandomStatusCd() string {
	statusCodes := []string{"ok", "abort", "error", "good", "cancel"}
	return statusCodes[rand.Intn(len(statusCodes))]
}

func randomDateInRange(startStr, endStr string) (string, error) {
	startDate, err := time.Parse("2006-01-02", startStr)
	if err != nil {
		return "", err
	}
	endDate, err := time.Parse("2006-01-02", endStr)
	if err != nil {
		return "", err
	}
	startUnix := startDate.Unix()
	endUnix := endDate.Unix()
	randomUnix := startUnix + rand.Int63n(endUnix-startUnix)
	randomDate := time.Unix(randomUnix, 0)
	return randomDate.Format("2006-01-02 15:04:05"), nil
}

func generateMockDirName(tableName string) string {
	return fmt.Sprintf("mock_data_%s", tableName)
}

// 生成模拟数据
func generateMockData(batchName string, extraColumns int, ukIdDuplicationRate float64, fileCount int, recordsPerFile int) error {
	tableName := fmt.Sprintf("uk_test_%s", batchName)
	recordCount := fileCount * recordsPerFile
	ukIds := make([]int64, recordCount)
	uniqueCount := 0

	for i := range ukIds {
		if i > 0 && rand.Float64() < ukIdDuplicationRate {
			ukIds[i] = ukIds[rand.Intn(i)]
		} else {
			ukIds[i] = rand.Int63()
			uniqueCount++
		}
	}

	mockDir := generateMockDirName(tableName)
	if err := prepareMockDir(mockDir); err != nil {
		return err
	}

	concurrency := runtime.NumCPU() / 2
	var wg sync.WaitGroup
	sem := make(chan struct{}, concurrency)
	var completedFiles int64

	ticker := time.NewTicker(2 * time.Second)
	defer ticker.Stop()
	go func() {
		for range ticker.C {
			completed := atomic.LoadInt64(&completedFiles)
			progress := float64(completed) / float64(fileCount) * 100
			log.Printf("Data %s generation progress: %.2f%% (%d/%d files completed)", batchName, progress, completed, fileCount)
		}
	}()

	for i := 0; i < fileCount; i++ {
		wg.Add(1)
		sem <- struct{}{}
		go func(index int) {
			defer wg.Done()
			defer func() { <-sem }()

			fileName := fmt.Sprintf("%s/%s_%d.json", mockDir, tableName, index)
			if err := writeMockFile(fileName, ukIds, index, recordsPerFile, extraColumns); err != nil {
				log.Printf("Failed to write file %s: %v", fileName, err)
			}
			atomic.AddInt64(&completedFiles, 1)
		}(i)
	}

	wg.Wait()
	log.Printf("Data %s generation progress: 100%% (%d/%d files completed)", batchName, fileCount, fileCount)
	fmt.Printf("Mock data %s generated successfully. total record: %d, unique record: %d\n", batchName, recordCount, uniqueCount)

	if err := writeUniqueCount(mockDir, uniqueCount); err != nil {
		return err
	}

	if err := writeTotalCount(mockDir, recordCount); err != nil {
		return err
	}

	return nil
}

// 准备 mockDir 目录，处理目录存在和不存在的情况
func prepareMockDir(mockDir string) error {
	files, err := os.ReadDir(mockDir)
	if err != nil {
		if os.IsNotExist(err) {
			return os.MkdirAll(mockDir, 0755)
		}
		return err
	}

	for _, file := range files {
		if !file.IsDir() {
			if err := os.Remove(fmt.Sprintf("%s/%s", mockDir, file.Name())); err != nil {
				return err
			}
		}
	}
	log.Printf("removed old mock data")
	return nil
}

// 写入单个 mock 文件
func writeMockFile(fileName string, ukIds []int64, index, recordsPerFile, extraColumns int) error {
	file, err := os.Create(fileName)
	if err != nil {
		return fmt.Errorf("failed to create file %s: %w", fileName, err)
	}
	defer file.Close()

	start := index * recordsPerFile
	end := start + recordsPerFile
	for j := start; j < end; j++ {
		createDate, err := randomDateInRange("2025-01-01", "2025-03-01")
		if err != nil {
			return fmt.Errorf("failed to generate random date: %w", err)
		}

		record := map[string]interface{}{
			"uk_id":       ukIds[j],
			"cust_id":     rand.Int63(),
			"channel_id":  rand.Intn(20000),
			"create_date": createDate,
			"status_cd":   getRandomStatusCd(),
			"status_date": createDate,
		}
		col_str := randomString(20)
		for k := 1; k <= extraColumns; k++ {
			record[fmt.Sprintf("col_%d", k)] = col_str
		}

		jsonData, err := json.Marshal(record)
		if err != nil {
			return fmt.Errorf("failed to marshal JSON: %w", err)
		}
		if _, err := file.Write(append(jsonData, '\n')); err != nil {
			return fmt.Errorf("failed to write to file %s: %w", fileName, err)
		}
	}
	return nil
}

func writeCountToFile(mockDir, fileName string, count int) error {
	filePath := fmt.Sprintf("%s/%s", mockDir, fileName)
	file, err := os.Create(filePath)
	if err != nil {
		return fmt.Errorf("failed to create file: %w", err)
	}
	defer file.Close()

	if _, err := file.WriteString(fmt.Sprintf("%d", count)); err != nil {
		return fmt.Errorf("failed to write count to file: %w", err)
	}
	return nil
}

func readCountFromFile(mockDir, fileName string) (int, error) {
	filePath := fmt.Sprintf("%s/%s", mockDir, fileName)
	file, err := os.Open(filePath)
	if err != nil {
		return 0, fmt.Errorf("failed to open unique count file: %w", err)
	}
	defer file.Close()

	data, err := io.ReadAll(file)
	if err != nil {
		return 0, fmt.Errorf("failed to read unique count file: %w", err)
	}

	var count int
	if _, err := fmt.Sscanf(string(data), "%d", &count); err != nil {
		return 0, fmt.Errorf("failed to parse unique count: %w", err)
	}

	return count, nil
}

func readUniqueCount(mockDir string) (int, error) {
	return readCountFromFile(mockDir, "unique")
}

func readTotalCount(mockDir string) (int, error) {
	return readCountFromFile(mockDir, "total")
}

func writeUniqueCount(mockDir string, uniqueCount int) error {
	return writeCountToFile(mockDir, "unique", uniqueCount)
}

func writeTotalCount(mockDir string, totalCount int) error {
	return writeCountToFile(mockDir, "total", totalCount)
}

// 处理导入文件时的错误，记录失败信息
func handleImportError(failedLabels *sync.Map, label string, filePath string, err error, message string, url string) {
	failedLabels.Store(label, true)
	log.Printf("Failed to %s for file %s with label %s. Url: %s, Error details: %v", message, filePath, label, url, err)
}

// 处理导入响应，解析 JSON 并检查状态
func handleImportResponse(failedLabels *sync.Map, label string, filePath string, resp *http.Response, durations *[]int64, durationsMutex *sync.Mutex, url string) {
	defer resp.Body.Close()
	body, err := io.ReadAll(resp.Body)
	if err != nil {
		handleImportError(failedLabels, label, filePath, err, "read response", url)
		return
	}
	var loadResp LoadResponse
	err = json.Unmarshal(body, &loadResp)
	if err != nil {
		handleImportError(failedLabels, label, filePath, fmt.Errorf("%s", body), "parse response JSON", url)
		return
	}
	if loadResp.Status != "Success" {
		handleImportError(failedLabels, label, filePath, fmt.Errorf("%s", body), "import data", url)
	} else {
		durationsMutex.Lock()
		*durations = append(*durations, int64(loadResp.LoadTimeMs))
		durationsMutex.Unlock()
		log.Printf("Import succeeded for label %s. Load Time: %d ms", label, loadResp.LoadTimeMs)
	}
}

// 并发导入数据到 Doris，并统计耗时
func importDataConcurrently(concurrency int, ip, port, username, password, dbName, batchName string) error {
	tableName := fmt.Sprintf("uk_test_%s", batchName)
	var wg sync.WaitGroup
	sem := make(chan struct{}, concurrency)
	loadURL := fmt.Sprintf("http://%s:%s/api/%s/%s/_stream_load", ip, port, dbName, tableName)
	log.Printf("loadURL:%s", loadURL)
	mockDir := generateMockDirName(tableName)
	files, err := os.ReadDir(mockDir)
	if err != nil {
		return fmt.Errorf("failed to read %s directory: %w", mockDir, err)
	}
	var counter int64
	var failedLabels sync.Map
	var durations []int64
	var durationsMutex sync.Mutex

	processFile := func(filePath string) {
		defer wg.Done()
		defer func() { <-sem }()

		file, err := os.Open(filePath)
		if err != nil {
			log.Printf("Failed to open file %s: %v", filePath, err)
			return
		}
		defer file.Close()

		fileContent, err := io.ReadAll(file)
		if err != nil {
			log.Printf("Failed to read file %s: %v", filePath, err)
			return
		}

		req, err := http.NewRequest("PUT", loadURL, bytes.NewReader(fileContent))
		if err != nil {
			log.Printf("Failed to create request for %s: %v", filePath, err)
			return
		}

		req.SetBasicAuth(username, password)
		currentCounter := atomic.AddInt64(&counter, 1)
		uniqueID := randomString(8)
		label := fmt.Sprintf("%s_%s_%d", uniqueID, tableName, currentCounter)
		req.Header.Set("label", label)
		req.Header.Set("Expect", "100-continue")
		req.Header.Set("format", "json")
		req.Header.Set("strip_outer_array", "false")
		req.Header.Set("read_json_by_line", "true")
		var redirectHost []string
		client := &http.Client{
			Timeout: 10 * time.Minute,
			CheckRedirect: func(req *http.Request, via []*http.Request) error {
				if len(via) >= 10 {
					log.Printf("Too many redirects, stopping at %s", req.URL.String())
					return http.ErrUseLastResponse
				}
				redirectHost = append(redirectHost, req.URL.Host)
				return nil
			},
		}
		resp, err := client.Do(req)
		if err != nil {
			handleImportError(&failedLabels, label, filePath, err, "send request", strings.Join(redirectHost, ","))
			return
		}
		handleImportResponse(&failedLabels, label, filePath, resp, &durations, &durationsMutex, strings.Join(redirectHost, ","))
	}

	start := time.Now()
	for _, file := range files {
		if file.IsDir() {
			continue
		}
		if strings.HasSuffix(file.Name(), ".json") {
			filePath := fmt.Sprintf("%s/%s", mockDir, file.Name())
			wg.Add(1)
			sem <- struct{}{}
			go processFile(filePath)
		}
	}

	wg.Wait()
	elapsed := time.Since(start)
	elapsedMs := elapsed.Milliseconds()
	log.Printf("Data import took %d ms", elapsedMs)

	if len(durations) > 0 {
		minDuration := durations[0]
		maxDuration := durations[0]
		totalDuration := int64(0)

		for _, duration := range durations {
			if duration < minDuration {
				minDuration = duration
			}
			if duration > maxDuration {
				maxDuration = duration
			}
			totalDuration += duration
		}

		averageDuration := totalDuration / int64(len(durations))
		log.Printf("Import for %s time statistics: Min: %d ms, Max: %d ms, Average: %d ms", tableName, minDuration, maxDuration, averageDuration)
	}

	var failedCount int64
	var failedLabelList []string
	failedLabels.Range(func(key, value interface{}) bool {
		failedCount++
		failedLabelList = append(failedLabelList, key.(string))
		return true
	})

	if failedCount > 0 {
		log.Printf("Total import failures: %d. Failed labels: %v", failedCount, failedLabelList)
	} else {
		log.Printf("Data imported all successfully for %s !", tableName)
	}
	return nil
}

// 打开数据库连接并设置连接池参数
func openDatabaseConnection(username, password, ip, port string) (*sql.DB, error) {
	dsn := fmt.Sprintf("%s:%s@tcp(%s:%s)/", username, password, ip, port)
	db, err := sql.Open("mysql", dsn)
	if err != nil {
		return nil, err
	}
	log.Printf("Database connection established.dns:%s", dsn)
	return db, nil
}

func getActualCount(db *sql.DB, dbName, tableName string) (int, error) {
	var actualCount int
	rows, err := db.Query(fmt.Sprintf("select count() FROM %s.%s", dbName, tableName))
	if err != nil {
		return 0, fmt.Errorf("failed to query count from table %s: %w", tableName, err)
	}
	defer rows.Close()
	if rows.Next() {
		if err := rows.Scan(&actualCount); err != nil {
			return 0, fmt.Errorf("failed to scan count from table %s: %w", tableName, err)
		}
	}
	if err := rows.Err(); err != nil {
		return 0, fmt.Errorf("error while iterating over rows: %w", err)
	}
	return actualCount, nil
}

func compareCounts(tableName string, mockCount, actualCount int) {
	if mockCount == actualCount {
		log.Printf("Data quality check passed for table %s. Mock count: %d, Actual count: %d", tableName, mockCount, actualCount)
	} else {
		log.Printf("Data quality check failed for table %s. Mock count: %d, Actual count: %d", tableName, mockCount, actualCount)
	}
}

func getMockCount(mockDir, mode string) (int, error) {
	var mockCount int
	var err error
	if mode == "unique" {
		mockCount, err = readUniqueCount(mockDir)
	} else {
		mockCount, err = readTotalCount(mockDir)
	}
	return mockCount, err
}

func checkDataQulity(ip, port, username, password, dbName, batchName, mode string) error {
	tableName := fmt.Sprintf("uk_test_%s", batchName)
	mockDir := generateMockDirName(tableName)
	mockCount, err := getMockCount(mockDir, mode)
	if err != nil {
		return fmt.Errorf("failed to get mock count: %w", err)
	}
	db, err := openDatabaseConnection(username, password, ip, port)
	if err != nil {
		return fmt.Errorf("failed to open database connection: %w", err)
	}
	defer db.Close()
	actualCount, err := getActualCount(db, dbName, tableName)
	if err != nil {
		return fmt.Errorf("failed to query count from table %s: %w", tableName, err)
	}
	compareCounts(tableName, mockCount, actualCount)
	return nil
}

func BatchcheckDataQulity(batch int, ip, port, username, password, dbName, mode string) error {
	db, err := openDatabaseConnection(username, password, ip, port)
	if err != nil {
		return fmt.Errorf("failed to open database connection: %w", err)
	}
	defer db.Close()
	for i := 1; i <= batch; i++ {
		tableName := fmt.Sprintf("uk_test_%s", generateMockBatchName(i))
		mockDir := generateMockDirName(tableName)
		mockCount, err := getMockCount(mockDir, mode)
		if err != nil {
			return fmt.Errorf("failed to get mock count: %w", err)
		}
		actualCount, err := getActualCount(db, dbName, tableName)
		if err != nil {
			return fmt.Errorf("failed to query count from table %s: %w", tableName, err)
		}
		compareCounts(tableName, mockCount, actualCount)
	}
	return nil
}

func generateMockBatchName(idx int) string {
	return fmt.Sprintf("batch_%d", idx)
}

func batchCreateTable(batch int, extraColumns int, mode string) {
	for i := 1; i <= batch; i++ {
		createTable(generateMockBatchName(i), extraColumns, mode)
	}
}

func batchGenerateMockData(batch int, extraColumns int, ukIdDuplicationRate float64, fileCount int, recordsPerFile int) {
	for i := 1; i <= batch; i++ {
		batchName := generateMockBatchName(i)
		generateMockData(batchName, extraColumns, ukIdDuplicationRate, fileCount, recordsPerFile)
	}
}

func batchImportData(batch int, concurrency int, ip, port, username, password, dbName string) {
	start := time.Now()
	var wg sync.WaitGroup
	sem := make(chan struct{}, batch)
	var durations []int64
	var durationsMutex sync.Mutex

	for i := 1; i <= batch; i++ {
		wg.Add(1)
		sem <- struct{}{}

		go func(index int) {
			defer func() {
				<-sem
				wg.Done()
			}()

			batchName := generateMockBatchName(index)
			batchStart := time.Now()
			err := importDataConcurrently(concurrency, ip, port, username, password, dbName, batchName)
			elapsed := time.Since(batchStart).Milliseconds()

			durationsMutex.Lock()
			durations = append(durations, elapsed)
			durationsMutex.Unlock()

			if err != nil {
				log.Printf("Failed to import data for batch %s: %v", batchName, err)
			} else {
				log.Printf("Import data for batch %s in %d ms", batchName, elapsed)
			}
		}(i)
	}

	wg.Wait()

	if len(durations) > 0 {
		minDuration := durations[0]
		maxDuration := durations[0]
		totalDuration := int64(0)

		for _, duration := range durations {
			if duration < minDuration {
				minDuration = duration
			}
			if duration > maxDuration {
				maxDuration = duration
			}
			totalDuration += duration
		}

		averageDuration := totalDuration / int64(len(durations))
		totalElapsed := time.Since(start).Milliseconds()
		log.Printf("Batch generation time statistics: Min: %d ms, Max: %d ms, Average: %d ms, Total: %d ms", minDuration, maxDuration, averageDuration, totalElapsed)
	}
}

func generateInsertSQL(dbName, batchName string, extraColumns int, insertCount int) (string, error) {
	tableName := fmt.Sprintf("uk_test_%s", batchName)
	var builder strings.Builder

	columns := make([]string, 0, 6+extraColumns)
	columns = append(columns, "uk_id", "cust_id", "channel_id", "create_date", "status_cd", "status_date")
	for i := 1; i <= extraColumns; i++ {
		columns = append(columns, fmt.Sprintf("col_%d", i))
	}

	values := make([]string, 0, insertCount)
	for i := 0; i < insertCount; i++ {
		ukId := rand.Int63()
		custId := rand.Int63()
		channelId := rand.Intn(20000)
		createDate, err := randomDateInRange("2025-01-01", "2025-03-01")
		if err != nil {
			return "", fmt.Errorf("failed to generate random date: %w", err)
		}
		statusCd := getRandomStatusCd()

		var valueBuilder strings.Builder
		valueBuilder.WriteString(fmt.Sprintf("(%d, %d, %d, '%s', '%s', '%s'",
			ukId, custId, channelId, createDate, statusCd, createDate))

		for j := 1; j <= extraColumns; j++ {
			colStr := randomString(20)
			valueBuilder.WriteString(fmt.Sprintf(", '%s'", colStr))
		}
		valueBuilder.WriteString(")")
		values = append(values, valueBuilder.String())
	}

	builder.WriteString(fmt.Sprintf("INSERT INTO %s.%s (%s) VALUES %s;",
		dbName,
		tableName,
		strings.Join(columns, ", "),
		strings.Join(values, ", ")))

	return builder.String(), nil
}

func InsertPerformance(db *sql.DB, dbName, batchName string, extraColumns int, totalCount int, batchSize int, concurrency int) error {
	start := time.Now()
	var totalInserted int
	var mutex sync.Mutex

	if concurrency <= 1 {
		for totalInserted < totalCount {
			count := batchSize
			if totalInserted+count > totalCount {
				count = totalCount - totalInserted
			}

			sql, err := generateInsertSQL(dbName, batchName, extraColumns, count)
			if err != nil {
				return err
			}
			_, err = db.Exec(sql)
			if err != nil {
				return fmt.Errorf("failed to execute insert: %w", err)
			}

			totalInserted += count
			log.Printf("Inserted %d records (total: %d/%d)", count, totalInserted, totalCount)
		}
	} else {
		var wg sync.WaitGroup
		sem := make(chan struct{}, concurrency)
		var firstErr error
		var errOnce sync.Once

		for totalInserted < totalCount {
			wg.Add(1)
			sem <- struct{}{}

			go func() {
				defer wg.Done()
				defer func() { <-sem }()

				mutex.Lock()
				count := batchSize
				if totalInserted+count > totalCount {
					count = totalCount - totalInserted
				}
				totalInserted += count
				mutex.Unlock()
				if count > 0 {
					sql, err := generateInsertSQL(dbName, batchName, extraColumns, count)
					if err != nil {
						errOnce.Do(func() { firstErr = err })
						return
					}

					_, err = db.Exec(sql)
					if err != nil {
						errOnce.Do(func() { firstErr = fmt.Errorf("failed to execute insert: %w", err) })
						return
					}

					log.Printf("Inserted %d records (total: %d/%d)", count, totalInserted, totalCount)
				}
			}()
		}

		wg.Wait()
		if firstErr != nil {
			return firstErr
		}
	}

	elapsed := time.Since(start)
	log.Printf("Finished inserting %d records in %v (batch size: %d, concurrency: %d)",
		totalCount, elapsed, batchSize, concurrency)
	return nil
}

var (
	dbName              string
	batchName           string
	extraColumns        int
	recordsPerFile      int
	ukIdDuplicationRate float64
	fileCount           int
	concurrency         int
	dorisIP             string
	dorisPort           string
	dorisQueryPort      string
	dorisUsername       string
	dorisPassword       string
	batch               int
	totalCount          int
	mode                string
)

// 生成建表语句子命令
var createTableCmd = &cobra.Command{
	Use:   "create",
	Short: "Generate create table SQL",
	Run: func(cmd *cobra.Command, args []string) {
		// 打印建表语句
		createTable(batchName, extraColumns, mode)
	},
}

// 生成 mock 数据子命令
var generateMockDataCmd = &cobra.Command{
	Use:   "mock",
	Short: "Generate mock data",
	Run: func(cmd *cobra.Command, args []string) {
		// 生成模拟数据
		err := generateMockData(batchName, extraColumns, ukIdDuplicationRate, fileCount, recordsPerFile)
		if err != nil {
			fmt.Println("Failed to generate mock data:", err)
			return
		}
	},
}

var importDataCmd = &cobra.Command{
	Use:   "import",
	Short: "Import data to Doris",
	Run: func(cmd *cobra.Command, args []string) {
		// 并发导入数据到 Doris
		err := importDataConcurrently(concurrency, dorisIP, dorisPort, dorisUsername, dorisPassword, dbName, batchName)
		if err != nil {
			fmt.Println("Failed to import data:", err)
			return
		}
	},
}

var rootCmd = &cobra.Command{
	Use:   "doris-mock-data",
	Short: "Generate mock data and import to Apache Doris",
	Long:  "This tool helps you generate mock data and import it to Apache Doris using Stream Load.",
}

var batchCreateTableCmd = &cobra.Command{
	Use:   "batch-create",
	Short: "Batch create tables",
	Run: func(cmd *cobra.Command, args []string) {
		batchCreateTable(batch, extraColumns, mode)
	},
}

var batchGenerateMockDataCmd = &cobra.Command{
	Use:   "batch-mock",
	Short: "Batch generate mock data",
	Run: func(cmd *cobra.Command, args []string) {
		batchGenerateMockData(batch, extraColumns, ukIdDuplicationRate, fileCount, recordsPerFile)
	},
}

var batchImportDataCmd = &cobra.Command{
	Use:   "batch-import",
	Short: "Batch import data to Doris",
	Run: func(cmd *cobra.Command, args []string) {
		batchImportData(batch, concurrency, dorisIP, dorisPort, dorisUsername, dorisPassword, dbName)
	},
}

var checkDataQualityCmd = &cobra.Command{
	Use:   "check",
	Short: "Check data quality",
	Run: func(cmd *cobra.Command, args []string) {
		err := checkDataQulity(dorisIP, dorisQueryPort, dorisUsername, dorisPassword, dbName, batchName, mode)
		if err != nil {
			fmt.Println("Failed to check data quality:", err)
		}
	},
}

// 新增命令
var insertTestCmd = &cobra.Command{
	Use:   "insert",
	Short: "Test INSERT INTO VALUES performance",
	Run: func(cmd *cobra.Command, args []string) {
		db, err := openDatabaseConnection(dorisUsername, dorisPassword, dorisIP, dorisQueryPort)
		if err != nil {
			fmt.Println("Failed to connect to database:", err)
			return
		}
		defer db.Close()

		err = InsertPerformance(db, dbName, batchName, extraColumns, totalCount, batch, concurrency)
		if err != nil {
			fmt.Println("Insert test failed:", err)
		}
	},
}

var batchcheckDataQulityCmd = &cobra.Command{
	Use:   "batch-check",
	Short: "Batch check data quality",
	Run: func(cmd *cobra.Command, args []string) {
		err := BatchcheckDataQulity(batch, dorisIP, dorisQueryPort, dorisUsername, dorisPassword, dbName, mode)
		if err != nil {
			fmt.Println("Failed to check data quality:", err)
		}
	},
}

func init() {
	// 设置 createTableCmd 的标志
	createTableCmd.Flags().StringVar(&batchName, "batch-name", time.Now().Format("20060102"), "Batch name for table and data generation")
	createTableCmd.Flags().IntVar(&extraColumns, "extra-columns", 1, "Number of extra columns in the table")
	createTableCmd.Flags().StringVar(&mode, "mode", "unique", "table type,such as unique or duplicate")
	rootCmd.AddCommand(createTableCmd)

	// 设置 generateMockDataCmd 的标志
	generateMockDataCmd.Flags().StringVar(&batchName, "batch-name", time.Now().Format("20060102"), "Batch name for table and data generation")
	generateMockDataCmd.Flags().IntVar(&extraColumns, "extra-columns", 1, "Number of extra columns in the table")
	generateMockDataCmd.Flags().IntVar(&recordsPerFile, "record-per-file", 1000, "Number of records to generate in each file")
	generateMockDataCmd.Flags().IntVar(&fileCount, "file-count", 5, "Number of files to split the generated data into")
	generateMockDataCmd.Flags().Float64Var(&ukIdDuplicationRate, "uk-id-duplication-rate", 0.2, "Duplication rate for uk_id column")
	rootCmd.AddCommand(generateMockDataCmd)

	// 设置 importDataCmd 的标志
	importDataCmd.Flags().IntVar(&concurrency, "concurrency", 3, "Concurrency level for data import")
	importDataCmd.Flags().StringVar(&dorisIP, "ip", "127.0.0.1", "Doris IP address")
	importDataCmd.Flags().StringVar(&dorisPort, "port", "8030", "Doris port")
	importDataCmd.Flags().StringVar(&dorisUsername, "user", "root", "Doris username")
	importDataCmd.Flags().StringVar(&dorisPassword, "password", "", "Doris password")
	importDataCmd.Flags().StringVar(&dbName, "db", "test", "db name")
	importDataCmd.Flags().StringVar(&batchName, "batch-name", time.Now().Format("20060102"), "Batch name for table and data generation")
	rootCmd.AddCommand(importDataCmd)

	// 设置 batchCreateTableCmd 的标志
	batchCreateTableCmd.Flags().IntVar(&batch, "batch", 1, "Number of batches to create")
	batchCreateTableCmd.Flags().IntVar(&extraColumns, "extra-columns", 1, "Number of extra columns in the table")
	batchCreateTableCmd.Flags().StringVar(&mode, "mode", "unique", "table type,such as unique or duplicate")
	rootCmd.AddCommand(batchCreateTableCmd)

	// 设置 batchGenerateMockDataCmd 的标志
	batchGenerateMockDataCmd.Flags().IntVar(&batch, "batch", 1, "Number of batches to generate")
	batchGenerateMockDataCmd.Flags().IntVar(&extraColumns, "extra-columns", 1, "Number of extra columns in the table")
	batchGenerateMockDataCmd.Flags().IntVar(&recordsPerFile, "record-per-file", 1000, "Number of records to generate in each file")
	batchGenerateMockDataCmd.Flags().IntVar(&fileCount, "file-count", 5, "Number of files to split the generated data into")
	batchGenerateMockDataCmd.Flags().Float64Var(&ukIdDuplicationRate, "uk-id-duplication-rate", 0.2, "Duplication rate for uk_id column")
	rootCmd.AddCommand(batchGenerateMockDataCmd)

	// 设置 batchImportDataCmd 的标志
	batchImportDataCmd.Flags().IntVar(&batch, "batch", 1, "Number of batches to import")
	batchImportDataCmd.Flags().IntVar(&concurrency, "concurrency", 3, "Concurrency level for data import")
	batchImportDataCmd.Flags().StringVar(&dorisIP, "ip", "127.0.0.1", "Doris IP address")
	batchImportDataCmd.Flags().StringVar(&dorisPort, "port", "8030", "Doris port")
	batchImportDataCmd.Flags().StringVar(&dorisUsername, "user", "root", "Doris username")
	batchImportDataCmd.Flags().StringVar(&dorisPassword, "password", "", "Doris password")
	batchImportDataCmd.Flags().StringVar(&dbName, "db", "test", "db name")
	rootCmd.AddCommand(batchImportDataCmd)

	// 设置 checkDataQualityCmd 的标志
	checkDataQualityCmd.Flags().StringVar(&dorisIP, "ip", "127.0.0.1", "Doris IP address")
	checkDataQualityCmd.Flags().StringVar(&dorisQueryPort, "port", "9030", "Doris port")
	checkDataQualityCmd.Flags().StringVar(&dorisUsername, "user", "root", "Doris username")
	checkDataQualityCmd.Flags().StringVar(&dorisPassword, "password", "", "Doris password")
	checkDataQualityCmd.Flags().StringVar(&dbName, "db", "test", "db name")
	checkDataQualityCmd.Flags().StringVar(&batchName, "batch-name", time.Now().Format("20060102"), "Batch name for table and data generation")
	checkDataQualityCmd.Flags().StringVar(&mode, "mode", "unique", "table type,such as unique or duplicate")
	rootCmd.AddCommand(checkDataQualityCmd)

	// 设置 batchcheckDataQulityCmd 的标志
	batchcheckDataQulityCmd.Flags().IntVar(&batch, "batch", 1, "Number of batches to check")
	batchcheckDataQulityCmd.Flags().StringVar(&dorisIP, "ip", "127.0.0.1", "Doris IP address")
	batchcheckDataQulityCmd.Flags().StringVar(&dorisQueryPort, "port", "9030", "Doris port")
	batchcheckDataQulityCmd.Flags().StringVar(&dorisUsername, "user", "root", "Doris username")
	batchcheckDataQulityCmd.Flags().StringVar(&dorisPassword, "password", "", "Doris password")
	batchcheckDataQulityCmd.Flags().StringVar(&dbName, "db", "test", "db name")
	batchcheckDataQulityCmd.Flags().StringVar(&mode, "mode", "unique", "table type,such as unique or duplicate")
	rootCmd.AddCommand(batchcheckDataQulityCmd)

	insertTestCmd.Flags().StringVar(&batchName, "batch-name", time.Now().Format("20060102"), "Batch name for table")
	insertTestCmd.Flags().StringVar(&dorisIP, "ip", "127.0.0.1", "Doris IP address")
	insertTestCmd.Flags().StringVar(&dorisQueryPort, "port", "9030", "Doris query port")
	insertTestCmd.Flags().StringVar(&dorisUsername, "user", "root", "Doris username")
	insertTestCmd.Flags().StringVar(&dorisPassword, "password", "", "Doris password")
	insertTestCmd.Flags().StringVar(&dbName, "db", "test", "Database name")
	insertTestCmd.Flags().IntVar(&batch, "batch", 1, "Number of batches to import")
	insertTestCmd.Flags().IntVar(&extraColumns, "extra-columns", 1, "Number of extra columns in the table")
	insertTestCmd.Flags().IntVar(&totalCount, "total", 1000, "total count")
	insertTestCmd.Flags().IntVar(&concurrency, "concurrency", 1, "Concurrency level for data import")
	rootCmd.AddCommand(insertTestCmd)
}

func main() {
	if err := rootCmd.Execute(); err != nil {
		fmt.Println(err)
		os.Exit(1)
	}
}
