package service

import (
	"context"
	"fmt"
	"strings"

	"go.uber.org/zap"
	"go-file-perception-model/internal/config"
	"go-file-perception-model/internal/logger"
)

// ChunkOptimizer 分块优化器
// 负责分块大小优化和智能合并
type ChunkOptimizer struct {
	embeddingModel   EmbeddingModel
	semanticAnalyzer *SemanticAnalyzer
	sentenceSplitter *SentenceSplitter
	config          *config.FileIndexConfig
	chunkingConfig  *ChunkingConfig
	utils          *ChunkUtils
}

// NewChunkOptimizer 创建分块优化器
func NewChunkOptimizer(embeddingModel EmbeddingModel, cfg *config.FileIndexConfig) *ChunkOptimizer {
	return &ChunkOptimizer{
		embeddingModel:   embeddingModel,
		semanticAnalyzer: NewSemanticAnalyzer(embeddingModel),
		sentenceSplitter: NewSentenceSplitter(),
		config:          cfg,
		chunkingConfig:  DefaultChunkingConfig(),
		utils:          NewChunkUtils(cfg),
	}
}

// OptimizeChunkSizes 优化分块大小
func (co *ChunkOptimizer) OptimizeChunkSizes(chunks []string) []string {
	logger.Debug("Starting chunk size optimization", zap.Int("initial_chunks", len(chunks)))
	
	if co.embeddingModel == nil {
		return co.optimizeChunkSizesByLength(chunks)
	}
	
	return co.optimizeChunkSizesBySemantic(chunks)
}

// optimizeChunkSizesBySemantic 基于语义相似度的智能优化
func (co *ChunkOptimizer) optimizeChunkSizesBySemantic(chunks []string) []string {
	if len(chunks) <= 1 {
		return chunks
	}
	
	semanticSimilarityThreshold := co.chunkingConfig.SemanticSimilarityThreshold
	var optimized []string
	processed := make([]bool, len(chunks))
	
	for i, chunk := range chunks {
		if processed[i] {
			continue
		}
		
		chunkLen := len(chunk)
		
		// 如果块太大，先进行分割
		if chunkLen > co.config.MaxChunkSize {
			subChunks := co.splitLargeChunk(chunk)
			optimized = append(optimized, subChunks...)
			processed[i] = true
			continue
		}
		
		// 如果块大小合适，直接添加
		if chunkLen >= co.config.MinChunkSize {
			optimized = append(optimized, chunk)
			processed[i] = true
			continue
		}
		
		// 块太小，检查是否小于强制合并阈值
		forceMinChunkSize := co.chunkingConfig.ForceMinChunkSize
		
		if chunkLen < forceMinChunkSize {
			// 强制合并
			forceMergeCandidate, forceMergeIndex := co.findForcedMergeCandidate(chunks, processed, i)
			
			if forceMergeCandidate != "" && forceMergeIndex != -1 {
				merged := co.utils.MergeChunks(chunk, forceMergeCandidate, i < forceMergeIndex)
				optimized = append(optimized, merged)
				processed[i] = true
				processed[forceMergeIndex] = true
				
				logger.Info("Force merged chunk below minimum threshold",
					zap.Int("chunk_index", i),
					zap.Int("merge_index", forceMergeIndex),
					zap.Int("original_length", chunkLen),
					zap.Int("merged_length", len(merged)),
					zap.String("reason", string(ReasonForceMerge)))
				continue
			} else {
				logger.Warn("Cannot force merge chunk below minimum threshold",
					zap.Int("chunk_index", i),
					zap.Int("length", chunkLen))
				optimized = append(optimized, chunk)
				processed[i] = true
				continue
			}
		}
		
		// 尝试智能合并或语义合并
		co.processSmallChunk(chunks, processed, i, semanticSimilarityThreshold, &optimized)
	}
	
	logger.Debug("Chunk optimization completed",
		zap.Int("original_chunks", len(chunks)),
		zap.Int("optimized_chunks", len(optimized)))
	
	return optimized
}

// processSmallChunk 处理小分块
func (co *ChunkOptimizer) processSmallChunk(chunks []string, processed []bool, i int, threshold float32, optimized *[]string) {
	chunk := chunks[i]
	
	// 智能合并
	smartMergeCandidate, smartMergeIndex := co.findSmartMergeCandidate(chunks, processed, i)
	if smartMergeCandidate != "" && smartMergeIndex != -1 {
		merged := co.utils.MergeChunks(chunk, smartMergeCandidate, i < smartMergeIndex)
		*optimized = append(*optimized, merged)
		processed[i] = true
		processed[smartMergeIndex] = true
		return
	}
	
	// 语义合并
	mergeCandidate, mergeIndex := co.findBestMergeCandidate(chunks, processed, i, threshold)
	if mergeCandidate != "" && mergeIndex != -1 {
		merged := co.utils.MergeChunks(chunk, mergeCandidate, i < mergeIndex)
		*optimized = append(*optimized, merged)
		processed[i] = true
		processed[mergeIndex] = true
		return
	}
	
	// 无法合并，保持原样
	*optimized = append(*optimized, chunk)
	processed[i] = true
}

// findForcedMergeCandidate 强制合并候选查找
func (co *ChunkOptimizer) findForcedMergeCandidate(chunks []string, processed []bool, currentIndex int) (string, int) {
	currentChunk := strings.TrimSpace(chunks[currentIndex])
	if currentChunk == "" {
		return "", -1
	}
	
	// 尝试基于语义相似度找到最佳合并候选
	if co.embeddingModel != nil {
		currentEmbedding, err := co.getChunkEmbedding(currentChunk)
		if err == nil {
			bestCandidate, bestIndex := co.findBestSemanticCandidate(chunks, processed, currentIndex, currentEmbedding)
			if bestCandidate != "" {
				return bestCandidate, bestIndex
			}
		}
	}
	
	// 回退到位置优先合并
	return co.findPositionBasedCandidate(chunks, processed, currentIndex)
}

// findBestSemanticCandidate 查找最佳语义候选
func (co *ChunkOptimizer) findBestSemanticCandidate(chunks []string, processed []bool, currentIndex int, currentEmbedding []float32) (string, int) {
	bestCandidate := ""
	bestIndex := -1
	bestSimilarity := float32(-1.0)
	
	// 检查前后相邻块
	indices := []int{currentIndex - 1, currentIndex + 1}
	for _, idx := range indices {
		if idx < 0 || idx >= len(chunks) || processed[idx] {
			continue
		}
		
		chunk := chunks[idx]
		if len(chunks[currentIndex])+len(chunk)+1 <= co.config.MaxChunkSize*2 {
			embedding, err := co.getChunkEmbedding(chunk)
			if err == nil {
				similarity := co.semanticAnalyzer.CalculateCosineSimilarity(currentEmbedding, embedding)
				if similarity > bestSimilarity {
					bestCandidate = chunk
					bestIndex = idx
					bestSimilarity = similarity
				}
			}
		}
	}
	
	return bestCandidate, bestIndex
}

// findPositionBasedCandidate 基于位置的候选查找
func (co *ChunkOptimizer) findPositionBasedCandidate(chunks []string, processed []bool, currentIndex int) (string, int) {
	currentChunk := chunks[currentIndex]
	
	// 优先与后一个块合并
	if currentIndex < len(chunks)-1 && !processed[currentIndex+1] {
		nextChunk := chunks[currentIndex+1]
		if len(currentChunk)+len(nextChunk)+1 <= co.config.MaxChunkSize*2 {
			return nextChunk, currentIndex + 1
		}
	}
	
	// 尝试与前一个块合并
	if currentIndex > 0 && !processed[currentIndex-1] {
		prevChunk := chunks[currentIndex-1]
		if len(currentChunk)+len(prevChunk)+1 <= co.config.MaxChunkSize*2 {
			return prevChunk, currentIndex - 1
		}
	}
	
	return "", -1
}

// 其他方法的声明（实现将在后续添加）
func (co *ChunkOptimizer) optimizeChunkSizesByLength(chunks []string) []string {
	// 简化实现
	return chunks
}

func (co *ChunkOptimizer) findSmartMergeCandidate(chunks []string, processed []bool, currentIndex int) (string, int) {
	currentChunk := strings.TrimSpace(chunks[currentIndex])
	chunkType := co.utils.AnalyzeChunkType(currentChunk)
	
	switch chunkType {
	case ChunkTypeNumberSequence:
		return co.findNextMergeCandidate(chunks, processed, currentIndex)
	case ChunkTypePunctuation:
		return co.findPrevMergeCandidate(chunks, processed, currentIndex)
	default:
		return co.findNextMergeCandidate(chunks, processed, currentIndex)
	}
}

func (co *ChunkOptimizer) findNextMergeCandidate(chunks []string, processed []bool, currentIndex int) (string, int) {
	if currentIndex < len(chunks)-1 && !processed[currentIndex+1] {
		nextChunk := chunks[currentIndex+1]
		if len(chunks[currentIndex])+len(nextChunk)+1 <= co.config.MaxChunkSize {
			return nextChunk, currentIndex + 1
		}
	}
	return "", -1
}

func (co *ChunkOptimizer) findPrevMergeCandidate(chunks []string, processed []bool, currentIndex int) (string, int) {
	if currentIndex > 0 && !processed[currentIndex-1] {
		prevChunk := chunks[currentIndex-1]
		if len(chunks[currentIndex])+len(prevChunk)+1 <= co.config.MaxChunkSize {
			return prevChunk, currentIndex - 1
		}
	}
	return "", -1
}

func (co *ChunkOptimizer) findBestMergeCandidate(chunks []string, processed []bool, currentIndex int, threshold float32) (string, int) {
	// 简化实现，详细实现将在后续添加
	return co.findPositionBasedCandidate(chunks, processed, currentIndex)
}

func (co *ChunkOptimizer) splitLargeChunk(chunk string) []string {
	// 简化实现
	return []string{chunk}
}

func (co *ChunkOptimizer) getChunkEmbedding(chunk string) ([]float32, error) {
	cleanChunk := strings.TrimSpace(chunk)
	if cleanChunk == "" {
		return nil, fmt.Errorf("empty chunk")
	}
	
	return co.embeddingModel.Embedding(context.Background(), cleanChunk)
}