package main

import (
	"bufio"
	"fmt"
	"log"
	"os"
	"strings"
	"time"

	"go-file-perception-model/internal/config"
	"go-file-perception-model/internal/logger"
	"go-file-perception-model/internal/model"
	"go-file-perception-model/internal/service"
	"go-file-perception-model/pkg/embedding"
	"go-file-perception-model/pkg/vector"
)

func main() {
	// 初始化日志系统
	logger.InitLogger()
	
	// 初始化BGE模型
	bgeConfig := &embedding.BGEServiceConfig{
		Host:    "localhost",
		Port:    8000,
		APIKey:  "bge-service-secret-key-2023",
		Timeout: 30,
	}
	embeddingModel, err := embedding.NewBGEModel(bgeConfig)
	if err != nil {
		fmt.Printf("Failed to initialize BGE model: %v\n", err)
		return
	}

	// 初始化向量数据库
	_, err = vector.NewQdrantDB("localhost", 6333)
	if err != nil {
		fmt.Printf("Warning: Failed to initialize vector database: %v\n", err)
		fmt.Println("Continuing without vector database functionality...")
	}

	// 初始化文件处理器
	fileIndexConfig := &config.FileIndexConfig{
		SupportedExtensions: []string{".md", ".txt"},
		ChunkSize:           1000,
		ChunkOverlap:        200,
		MaxFileSize:         10485760,
		ExcludeDirs:         []string{".git", ".idea"},
		IndexDirectories:    []string{"./data"},
		EnableSummary:       false,
		SummaryLength:       200,
		SemanticChunking:    true,
		MinChunkSize:        100,
		MaxChunkSize:        2000,
		AutoIndexNewFiles:   true,
	}
	fileProcessor := service.NewFileProcessor(embeddingModel, fileIndexConfig)

	// 处理测试文件
	filePath := "d:/2025年项目/技术类项目/go文件感知国产模型/test/内容分块测试文本.md"
	
	// 处理文件并分块
	fmt.Println("开始分块处理...")
	startTime := time.Now()

	chunks, err := fileProcessor.ProcessFile(filePath)
	if err != nil {
		log.Fatalf("处理文件失败: %v", err)
	}

	elapsedTime := time.Since(startTime)
	fmt.Printf("分块完成，耗时: %v\n", elapsedTime)
	fmt.Printf("共生成 %d 个分块\n", len(chunks))

	// 读取原始文件内容
	originalContent, err := os.ReadFile(filePath)
	if err != nil {
		fmt.Printf("Failed to read original file: %v\n", err)
		return
	}

	// 分析分块结果
	fmt.Println("\n=== 分块结果分析 ===")
	analyzeChunks(chunks, string(originalContent))

	// 保存分块结果到文件
	outputFile := "d:/2025年项目/技术类项目/go文件感知国产模型/test/分块结果分析.txt"
	err = saveAnalysisResult(chunks, string(originalContent), outputFile)
	if err != nil {
		log.Printf("保存分析结果失败: %v", err)
	} else {
		fmt.Printf("\n分析结果已保存到: %s\n", outputFile)
	}
}

// 读取文件内容
func readFileContent(filePath string) (string, error) {
	file, err := os.Open(filePath)
	if err != nil {
		return "", err
	}
	defer file.Close()

	var builder strings.Builder
	scanner := bufio.NewScanner(file)
	for scanner.Scan() {
		builder.WriteString(scanner.Text())
		builder.WriteString("\n")
	}

	if err := scanner.Err(); err != nil {
		return "", err
	}

	return builder.String(), nil
}

// 分析分块结果
func analyzeChunks(chunks []*model.FileChunk, originalContent string) {
	totalChars := 0
	minChunkSize := int(^uint(0) >> 1) // 最大int值
	maxChunkSize := 0
	sentenceBreaks := 0
	sentenceBreakDetails := make([]string, 0)

	for i, chunk := range chunks {
		chunkSize := len(chunk.Content)
		totalChars += chunkSize

		if chunkSize < minChunkSize {
			minChunkSize = chunkSize
		}
		if chunkSize > maxChunkSize {
			maxChunkSize = chunkSize
		}

		// 检查分块是否在句子中间断开
		if i > 0 {
			prevChunk := chunks[i-1]
			prevChunkEnd := strings.TrimSpace(prevChunk.Content)
			currChunkStart := strings.TrimSpace(chunk.Content)

			// 检查前一个分块是否以句子结束符结尾
			prevEndsWithSentence := endsWithSentenceTerminator(prevChunkEnd)
			// 检查当前分块是否以句子开始符开始
			currStartsWithSentence := startsWithSentenceStart(currChunkStart)

			if !prevEndsWithSentence || !currStartsWithSentence {
				sentenceBreaks++
				detail := fmt.Sprintf("分块 %d-%d 可能在句子中间断开:\n", i, i+1)
				detail += fmt.Sprintf("  前一分块结尾: %q\n", prevChunkEnd[len(prevChunkEnd)-min(50, len(prevChunkEnd)):])
				detail += fmt.Sprintf("  后一分块开头: %q\n", currChunkStart[:min(50, len(currChunkStart))])
				sentenceBreakDetails = append(sentenceBreakDetails, detail)
			}
		}

		// 输出每个分块的基本信息
		fmt.Printf("\n--- 分块 %d ---\n", i+1)
		fmt.Printf("大小: %d 字符\n", chunkSize)
		fmt.Printf("位置: %d-%d\n", chunk.StartPos, chunk.EndPos)
		if chunk.StartLine > 0 && chunk.EndLine > 0 {
			fmt.Printf("行号: %d-%d\n", chunk.StartLine, chunk.EndLine)
		}
		fmt.Printf("内容预览: %q\n", chunk.Content[:min(100, len(chunk.Content))])
	}

	avgChunkSize := totalChars / len(chunks)

	fmt.Printf("\n=== 分块统计 ===\n")
	fmt.Printf("总字符数: %d\n", totalChars)
	fmt.Printf("平均分块大小: %d 字符\n", avgChunkSize)
	fmt.Printf("最小分块大小: %d 字符\n", minChunkSize)
	fmt.Printf("最大分块大小: %d 字符\n", maxChunkSize)
	fmt.Printf("可能的句子断裂数: %d\n", sentenceBreaks)

	if sentenceBreaks > 0 {
		fmt.Printf("\n=== 句子断裂详情 ===\n")
		for _, detail := range sentenceBreakDetails {
			fmt.Println(detail)
		}
	}
}

// 检查文本是否以句子结束符结尾
func endsWithSentenceTerminator(text string) bool {
	if len(text) == 0 {
		return true
	}

	// 中文句子结束符：。！？；
	// 英文句子结束符：.!?;
	lastChar := rune(text[len(text)-1])
	return lastChar == '。' || lastChar == '！' || lastChar == '？' || lastChar == '；' ||
		lastChar == '.' || lastChar == '!' || lastChar == '?' || lastChar == ';'
}



// 检查文本是否以句子开始符开始
func startsWithSentenceStart(text string) bool {
	if len(text) == 0 {
		return false
	}
	
	// 去除前导空格
	text = strings.TrimSpace(text)
	if len(text) == 0 {
		return false
	}
	
	// 检查是否以大写字母、数字或引号开始
	firstChar := text[0]
	return (firstChar >= 'A' && firstChar <= 'Z') || 
	       (firstChar >= '0' && firstChar <= '9') || 
	       firstChar == '"' || firstChar == '\u0027' || 
	       strings.HasPrefix(text, "「") || strings.HasPrefix(text, "『") || 
	       strings.HasPrefix(text, "（") || strings.HasPrefix(text, "【")
}

// 检查分块之间的句子断裂情况
func checkSentenceBreaks(chunks []*model.FileChunk) []string {
	var breaks []string
	sentenceEndings := []string{"。", "！", "？", ";"} // 中文句号、感叹号、问号、分号

	for i := 1; i < len(chunks); i++ {
		prevChunk := chunks[i-1]
		currChunk := chunks[i]
		prevChunkEnd := strings.TrimSpace(prevChunk.Content)
		currChunkStart := strings.TrimSpace(currChunk.Content)

		// 检查前一个分块是否以句子结束符结尾
		prevEndsWithSentence := false
		for _, ending := range sentenceEndings {
			if strings.HasSuffix(prevChunkEnd, ending) {
				prevEndsWithSentence = true
				break
			}
		}

		// 检查当前分块是否以句子开始符开始
		currStartsWithSentence := startsWithSentenceStart(currChunkStart)

		if !prevEndsWithSentence || !currStartsWithSentence {
			detail := fmt.Sprintf("分块 %d-%d 可能在句子中间断开:\n", i, i+1)
			detail += fmt.Sprintf("  前一分块结尾: %q\n", prevChunkEnd[len(prevChunkEnd)-min(50, len(prevChunkEnd)):])
			detail += fmt.Sprintf("  后一分块开头: %q\n", currChunkStart[:min(50, len(currChunkStart))])
			breaks = append(breaks, detail)
		}
	}

	return breaks
}

// 辅助函数：返回两个整数中的较小值
func min(a, b int) int {
	if a < b {
		return a
	}
	return b
}

// 保存结果
func saveResults(chunks []*model.FileChunk, breaks []string) {
	// 创建结果文件
	resultFile := "d:/2025年项目/技术类项目/go文件感知国产模型/test/chunk_analysis_result.txt"
	file, err := os.Create(resultFile)
	if err != nil {
		log.Printf("创建结果文件失败: %v", err)
		return
	}
	defer file.Close()

	// 写入分块信息
	for i, chunk := range chunks {
		_, err = fmt.Fprintf(file, "=== 分块 %d ===\n", i+1)
		if err != nil {
			log.Printf("写入分块信息失败: %v", err)
			return
		}

		_, err = fmt.Fprintf(file, "位置: %d-%d\n", chunk.StartPos, chunk.EndPos)
		if err != nil {
			log.Printf("写入位置信息失败: %v", err)
			return
		}

		_, err = fmt.Fprintf(file, "行号: %d-%d\n", chunk.StartLine, chunk.EndLine)
		if err != nil {
			log.Printf("写入行号信息失败: %v", err)
			return
		}

		_, err = fmt.Fprintf(file, "大小: %d 字符\n", len(chunk.Content))
		if err != nil {
			log.Printf("写入大小信息失败: %v", err)
			return
		}

		_, err = fmt.Fprintf(file, "内容:\n%s\n\n", chunk.Content)
		if err != nil {
			log.Printf("写入内容失败: %v", err)
			return
		}
	}

	// 写入句子断裂信息
	if len(breaks) > 0 {
		_, err = fmt.Fprintf(file, "\n=== 句子断裂分析 ===\n")
		if err != nil {
			log.Printf("写入断裂分析标题失败: %v", err)
			return
		}

		for _, detail := range breaks {
			_, err = fmt.Fprintf(file, "%s\n", detail)
			if err != nil {
				log.Printf("写入断裂详情失败: %v", err)
				return
			}
		}
	} else {
		_, err = fmt.Fprintf(file, "\n=== 句子断裂分析 ===\n")
		if err != nil {
			log.Printf("写入断裂分析标题失败: %v", err)
			return
		}

		_, err = fmt.Fprintf(file, "未发现句子断裂问题\n")
		if err != nil {
			log.Printf("写入无断裂信息失败: %v", err)
			return
		}
	}

	fmt.Printf("分析结果已保存到: %s\n", resultFile)
}
func saveAnalysisResult(chunks []*model.FileChunk, originalContent string, outputPath string) error {
	file, err := os.Create(outputPath)
	if err != nil {
		return err
	}
	defer file.Close()

	// 写入文件头
	file.WriteString("=== 内容分块测试结果分析 ===\n\n")
	file.WriteString(fmt.Sprintf("测试文件: %s\n", "内容分块测试文本.md"))
	file.WriteString(fmt.Sprintf("原始内容长度: %d 字符\n", len(originalContent)))
	file.WriteString(fmt.Sprintf("分块数量: %d\n", len(chunks)))
	file.WriteString("\n")

	// 写入分块统计
	totalChars := 0
	minChunkSize := int(^uint(0) >> 1)
	maxChunkSize := 0
	sentenceBreaks := 0

	for _, chunk := range chunks {
		chunkSize := len(chunk.Content)
		totalChars += chunkSize

		if chunkSize < minChunkSize {
			minChunkSize = chunkSize
		}
		if chunkSize > maxChunkSize {
			maxChunkSize = chunkSize
		}
	}

	avgChunkSize := totalChars / len(chunks)

	file.WriteString("=== 分块统计 ===\n")
	file.WriteString(fmt.Sprintf("总字符数: %d\n", totalChars))
	file.WriteString(fmt.Sprintf("平均分块大小: %d 字符\n", avgChunkSize))
	file.WriteString(fmt.Sprintf("最小分块大小: %d 字符\n", minChunkSize))
	file.WriteString(fmt.Sprintf("最大分块大小: %d 字符\n", maxChunkSize))
	file.WriteString("\n")

	// 写入每个分块的详细信息
	file.WriteString("=== 分块详情 ===\n")
	for i, chunk := range chunks {
		file.WriteString(fmt.Sprintf("\n--- 分块 %d ---\n", i+1))
		file.WriteString(fmt.Sprintf("大小: %d 字符\n", len(chunk.Content)))
		file.WriteString(fmt.Sprintf("位置: %d-%d\n", chunk.StartPos, chunk.EndPos))
		if chunk.StartLine > 0 && chunk.EndLine > 0 {
			file.WriteString(fmt.Sprintf("行号: %d-%d\n", chunk.StartLine, chunk.EndLine))
		}
		file.WriteString("内容:\n")
		file.WriteString("----------------------------------------\n")
		file.WriteString(chunk.Content)
		file.WriteString("\n----------------------------------------\n")

		// 检查分块是否在句子中间断开
		if i > 0 {
			prevChunk := chunks[i-1]
			prevChunkEnd := strings.TrimSpace(prevChunk.Content)
			currChunkStart := strings.TrimSpace(chunk.Content)

			prevEndsWithSentence := endsWithSentenceTerminator(prevChunkEnd)
			currStartsWithSentence := startsWithSentenceStart(currChunkStart)

			if !prevEndsWithSentence || !currStartsWithSentence {
				sentenceBreaks++
				file.WriteString("\n*** 警告: 此分块可能在句子中间断开 ***\n")
				file.WriteString(fmt.Sprintf("前一分块结尾: %q\n", prevChunkEnd[len(prevChunkEnd)-min(50, len(prevChunkEnd)):]))
				file.WriteString(fmt.Sprintf("后一分块开头: %q\n", currChunkStart[:min(50, len(currChunkStart))]))
			}
		}
	}

	file.WriteString(fmt.Sprintf("\n=== 总结 ===\n"))
	file.WriteString(fmt.Sprintf("可能的句子断裂数: %d\n", sentenceBreaks))
	if sentenceBreaks > 0 {
		file.WriteString("检测到多个分块在句子中间断开，建议优化分块算法。\n")
	} else {
		file.WriteString("未检测到明显的句子断裂问题。\n")
	}

	return nil
}