package service

import (
	"context"
	"fmt"
	"os"
	"path/filepath"
	"regexp"
	"runtime"
	"sort"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/google/uuid"
	"go.uber.org/zap"
	"go-file-perception-model/internal/logger"
	"go-file-perception-model/internal/model"
	"go-file-perception-model/pkg/vector"
)

// RegexSearchService 正则表达式搜索服务
type RegexSearchService struct {
	vectorDB      vector.VectorDB
	fileCollector *FileCollector
	highlighter   *SearchHighlighter
}

// NewRegexSearchService 创建正则表达式搜索服务实例
func NewRegexSearchService(vectorDB vector.VectorDB, fileCollector *FileCollector, highlighter *SearchHighlighter) *RegexSearchService {
	return &RegexSearchService{
		vectorDB:      vectorDB,
		fileCollector: fileCollector,
		highlighter:   highlighter,
	}
}

// Search 执行正则表达式搜索
func (s *RegexSearchService) Search(req model.RegexSearchRequest) ([]*model.SearchResult, error) {
	// 添加调试日志
	logger.Info("RegexSearch called", zap.String("pattern", req.Pattern))
	
	// 直接使用原始模式，不进行额外的转义处理
	// 前端已经正确处理了正则表达式，后端不需要再做转义
	pattern := req.Pattern
	
	// 添加调试日志
	logger.Info("Using regex pattern", zap.String("pattern", pattern))
	
	// 尝试编译正则表达式
	regex, err := regexp.Compile(pattern)
	if err != nil {
		logger.Error("Failed to compile regex pattern", zap.String("pattern", pattern), zap.Error(err))
		return nil, fmt.Errorf("failed to compile regex pattern: %w", err)
	}
	
	// 记录成功编译的模式
	usedPattern := pattern
	logger.Info("Successfully compiled regex pattern", zap.String("pattern", usedPattern))

	// 获取索引目录
	var directories []string
	if len(req.Directories) > 0 {
		// 使用请求中指定的目录
		directories = req.Directories
	} else {
		// 获取所有已配置的索引目录
		directories = s.getIndexDirectories()
		if len(directories) == 0 {
			logger.Error("No index directories configured")
			return nil, fmt.Errorf("no index directories configured")
		}
	}

	// 收集所有需要搜索的文件
	var allFiles []string
	for _, dir := range directories {
		files, err := s.fileCollector.CollectFiles(dir, true)
		if err != nil {
			logger.Error("Failed to collect files from directory", zap.String("directory", dir), zap.Error(err))
			continue
		}
		allFiles = append(allFiles, files...)
	}
	
	// 添加调试日志
	logger.Info("Collected files for regex search", 
		zap.String("pattern", req.Pattern),
		zap.Int("file_count", len(allFiles)))

	// 使用并行处理进行正则匹配
	results := s.parallelRegexMatchFiles(allFiles, regex, req)
	
	// 添加调试日志
	logger.Info("Regex search completed", 
		zap.String("used_pattern", usedPattern),
		zap.String("original_pattern", req.Pattern),
		zap.Int("total_files", len(allFiles)),
		zap.Int("results_returned", len(results)))

	// 如果启用去重归纳，对结果进行去重处理
	if req.EnableDedup {
		results = s.deduplicateResults(results, req.Limit)
	} else {
		// 如果不启用去重，只返回指定数量的结果
		if len(results) > req.Limit {
			results = results[:req.Limit]
		}
	}

	return results, nil
}

// deduplicateResults 对搜索结果进行去重归纳
func (s *RegexSearchService) deduplicateResults(results []*model.SearchResult, limit int) []*model.SearchResult {
	if len(results) == 0 {
		return results
	}

	// 按文件路径分组
	fileGroups := make(map[string][]*model.SearchResult)
	for _, result := range results {
		fileGroups[result.FilePath] = append(fileGroups[result.FilePath], result)
	}

	// 对每个文件组，选择得分最高的结果作为代表
	var dedupResults []*model.SearchResult
	for _, group := range fileGroups {
		// 找到得分最高的结果
		var bestResult *model.SearchResult
		var bestScore float32
		for _, result := range group {
			if result.Score > bestScore {
				bestScore = result.Score
				bestResult = result
			}
		}

		if bestResult != nil {
			// 在结果中添加匹配次数信息
			bestResult.Content = fmt.Sprintf("[匹配次数: %d] %s", len(group), bestResult.Content)
			dedupResults = append(dedupResults, bestResult)
		}
	}

	// 按得分和匹配次数排序
	sort.Slice(dedupResults, func(i, j int) bool {
		// 首先按得分排序
		if dedupResults[i].Score != dedupResults[j].Score {
			return dedupResults[i].Score > dedupResults[j].Score
		}
		// 得分相同的情况下，按匹配次数排序
		countI := s.extractMatchCount(dedupResults[i].Content)
		countJ := s.extractMatchCount(dedupResults[j].Content)
		return countI > countJ
	})

	// 限制返回数量
	if len(dedupResults) > limit {
		dedupResults = dedupResults[:limit]
	}

	return dedupResults
}

// extractMatchCount 从内容中提取匹配次数
func (s *RegexSearchService) extractMatchCount(content string) int {
	// 使用正则表达式提取匹配次数
	re := regexp.MustCompile(`^\[匹配次数: (\d+)\]`)
	matches := re.FindStringSubmatch(content)
	if len(matches) > 1 {
		if count, err := strconv.Atoi(matches[1]); err == nil {
			return count
		}
	}
	return 1
}

// parallelRegexMatchFiles 并行处理文件正则匹配
func (s *RegexSearchService) parallelRegexMatchFiles(files []string, regex *regexp.Regexp, req model.RegexSearchRequest) []*model.SearchResult {
	var wg sync.WaitGroup
	resultChan := make(chan *model.SearchResult, len(files))
	
	// 添加调试日志
	logger.Info("Starting parallel regex match", 
		zap.String("pattern", regex.String()),
		zap.Int("file_count", len(files)),
		zap.Int("worker_count", runtime.NumCPU()))
	
	// 创建worker池
	workerCount := runtime.NumCPU()
	jobs := make(chan string, len(files))
	
	// 启动worker
	for i := 0; i < workerCount; i++ {
		wg.Add(1)
		go func(workerID int) {
			defer wg.Done()
			for filePath := range jobs {
				// 处理单个文件
				results := s.processFileForRegex(filePath, regex, req)
				for _, result := range results {
					resultChan <- result
				}
			}
		}(i)
	}
	
	// 分发任务
	go func() {
		for _, file := range files {
			jobs <- file
		}
		close(jobs)
	}()
	
	// 等待所有worker完成
	go func() {
		wg.Wait()
		close(resultChan)
	}()
	
	// 收集结果
	results := make([]*model.SearchResult, 0)
	for result := range resultChan {
		results = append(results, result)
	}
	
	// 添加调试日志
	logger.Info("Parallel regex match completed", 
		zap.String("pattern", regex.String()),
		zap.Int("file_count", len(files)),
		zap.Int("results_found", len(results)))
	
	return results
}

// processFileForRegex 处理单个文件的正则匹配
func (s *RegexSearchService) processFileForRegex(filePath string, regex *regexp.Regexp, req model.RegexSearchRequest) []*model.SearchResult {
	var results []*model.SearchResult
	
	// 添加调试日志
	logger.Info("Processing file for regex", zap.String("file", filePath), zap.String("pattern", regex.String()))
	
	// 检查文件类型过滤
	if len(req.FileTypes) > 0 {
		fileExt := strings.ToLower(filepath.Ext(filePath))
		supported := false
		for _, fileType := range req.FileTypes {
			if fileExt == fileType {
				supported = true
				break
			}
		}
		if !supported {
			logger.Info("File type not supported", zap.String("file", filePath), zap.String("extension", fileExt))
			return results
		}
	}
	
	// 读取文件内容
	content, err := os.ReadFile(filePath)
	if err != nil {
		logger.Error("Failed to read file", zap.String("file", filePath), zap.Error(err))
		return results
	}
	
	text := string(content)
	
	// 添加调试日志
	logger.Info("File content loaded", 
		zap.String("file", filePath), 
		zap.Int("content_length", len(text)),
		zap.String("sample_content", text[:min(100, len(text))]))
	
	// 查找所有匹配的位置
	matches := regex.FindAllStringIndex(text, -1)
	
	// 添加调试日志
	logger.Info("Regex matching results", 
		zap.String("file", filePath), 
		zap.String("pattern", regex.String()),
		zap.Int("matches_found", len(matches)))
	
	// 如果没有匹配，记录更多调试信息
	if len(matches) == 0 {
		// 尝试找出可能的原因
		logger.Info("No matches found, checking for possible reasons",
			zap.String("file", filePath),
			zap.String("pattern", regex.String()),
			zap.String("sample_content", text[:min(200, len(text))]))
		
		// 尝试使用简化的正则表达式进行测试
		simplePattern := strings.ReplaceAll(regex.String(), "\\d", "[0-9]")
		logger.Info("Trying simplified pattern", zap.String("original", regex.String()), zap.String("simplified", simplePattern))
		
		if simpleRegex, err := regexp.Compile(simplePattern); err == nil {
			simpleMatches := simpleRegex.FindAllStringIndex(text, -1)
			logger.Info("Simplified pattern matches", 
				zap.String("pattern", simplePattern),
				zap.Int("matches", len(simpleMatches)))
			
			if len(simpleMatches) > 0 {
				logger.Info("Simplified pattern found matches, original pattern may have issues",
					zap.String("original_pattern", regex.String()),
					zap.String("simplified_pattern", simplePattern),
					zap.Int("simplified_matches", len(simpleMatches)))
			}
		}
		
		return results
	}

	// 为每个匹配创建结果
	for _, match := range matches {
		// 计算匹配的行号
		startLine := s.highlighter.CalculateLineNumber(text, match[0])
		endLine := s.highlighter.CalculateLineNumber(text, match[1]-1)
		
		// 获取文件大小信息
		var fileSize int64
		if fileInfo, err := os.Stat(filePath); err == nil {
			fileSize = fileInfo.Size()
		}
		
		// 创建搜索结果
		result := &model.SearchResult{
			FileID:    uuid.New().String(),
			FileName:  filepath.Base(filePath),
			FilePath:  filePath,
			ChunkID:   uuid.New().String(),
			Content:   text[match[0]:match[1]],
			Score:     1.0,
			Highlight: s.highlighter.GenerateRegexHighlightWithContext(text, regex, match[0], match[1]),
			ChunkNo:   1, // 文件作为整体处理
			StartPos:  match[0],
			EndPos:    match[1],
			StartLine: startLine,
			EndLine:   endLine,
			FileType:  strings.ToLower(filepath.Ext(filePath)),
			IndexedAt: time.Now(),
			FileSize:  fileSize,
		}
		
		results = append(results, result)
	}
	
	return results
}

// getIndexDirectories 获取索引目录列表
func (s *RegexSearchService) getIndexDirectories() []string {
	// 添加调试日志
	logger.Info("getIndexDirectories called")
	
	// 优先从配置获取索引目录
	if s.fileCollector != nil && s.fileCollector.config != nil && len(s.fileCollector.config.IndexDirectories) > 0 {
		logger.Info("Retrieved index directories from config", zap.Int("count", len(s.fileCollector.config.IndexDirectories)))
		for _, dir := range s.fileCollector.config.IndexDirectories {
			logger.Info("Config index directory", zap.String("path", dir))
		}
		return s.fileCollector.config.IndexDirectories
	}
	
	// 尝试多种方式获取已索引的文件
	// 方式1：使用indexed=true条件
	filter1 := &vector.Filter{
		Must: []vector.Condition{
			{
				Key: "indexed",
				Match: &vector.MatchValue{Value: true},
			},
		},
	}

	// 获取所有已索引的文件
	results, err := s.vectorDB.SearchPoints(context.Background(), make([]float32, 768), 10000, 0, filter1)
	if err != nil {
		logger.Error("Failed to get indexed files with indexed=true filter", zap.Error(err))
	} else {
		logger.Info("Found indexed files with indexed=true filter", zap.Int("count", len(results)))
	}
	
	// 方式2：不使用任何过滤条件，获取所有文件
	if len(results) == 0 {
		logger.Info("No files found with indexed=true filter, trying without filter")
		results, err = s.vectorDB.SearchPoints(context.Background(), make([]float32, 768), 10000, 0, nil)
		if err != nil {
			logger.Error("Failed to get all files", zap.Error(err))
		} else {
			logger.Info("Found files without filter", zap.Int("count", len(results)))
			for _, result := range results {
				logger.Info("File", zap.String("path", result.FileID))
			}
		}
	}
	
	// 收集所有已索引的目录
	directoryMap := make(map[string]bool)
	for _, result := range results {
		// 从文件路径中提取目录
		dir := filepath.Dir(result.FilePath)
		directoryMap[dir] = true
		logger.Info("Added directory from file", zap.String("directory", dir), zap.String("file", result.FilePath))
	}
	
	// 转换为切片
	dirPaths := make([]string, 0, len(directoryMap))
	for dir := range directoryMap {
		dirPaths = append(dirPaths, dir)
	}
	
	// 添加调试日志
	logger.Info("Retrieved index directories from vector DB", zap.Int("count", len(dirPaths)))
	for _, dir := range dirPaths {
		logger.Info("Index directory", zap.String("path", dir))
	}
	
	// 如果没有已索引的目录，返回默认目录
	if len(dirPaths) == 0 {
		currentDir, err := os.Getwd()
		if err != nil {
			logger.Error("Failed to get current directory", zap.Error(err))
			return []string{}
		}
		
		// 检查当前目录下是否有data目录
		dataDir := filepath.Join(currentDir, "data")
		if _, err := os.Stat(dataDir); err == nil {
			logger.Info("No indexed directories found, using default data directory", zap.String("directory", dataDir))
			return []string{dataDir}
		}
		
		// 如果没有data目录，返回当前目录
		logger.Info("No indexed directories found, using current directory as default", zap.String("directory", currentDir))
		return []string{currentDir}
	}
	
	return dirPaths
}