package service

import (
	"context"
	"fmt"
	"strings"

	"go.uber.org/zap"
	"go-file-perception-model/internal/config"
	"go-file-perception-model/internal/logger"
)

// ChunkProcessor 分块处理器
// 负责核心的分块逻辑处理
type ChunkProcessor struct {
	embeddingModel   EmbeddingModel
	semanticAnalyzer *SemanticAnalyzer
	sentenceSplitter *SentenceSplitter
	config          *config.FileIndexConfig
	chunkingConfig  *ChunkingConfig
}

// NewChunkProcessor 创建分块处理器
func NewChunkProcessor(embeddingModel EmbeddingModel, cfg *config.FileIndexConfig) *ChunkProcessor {
	return &ChunkProcessor{
		embeddingModel:   embeddingModel,
		semanticAnalyzer: NewSemanticAnalyzer(embeddingModel),
		sentenceSplitter: NewSentenceSplitter(),
		config:          cfg,
		chunkingConfig:  DefaultChunkingConfig(),
	}
}

// ProcessSemanticChunking 基于语义的句子边界分块
func (cp *ChunkProcessor) ProcessSemanticChunking(text string) ([]string, error) {
	logger.Debug("Starting semantic chunking with sentence boundary preservation")
	
	// 1. 首先按句子分割
	sentences := cp.sentenceSplitter.SplitIntoSentences(text)
	if len(sentences) == 0 {
		return []string{text}, nil
	}
	
	// 2. 获取每个句子的嵌入向量
	sentenceEmbeddings, err := cp.getSentenceEmbeddings(sentences)
	if err != nil {
		logger.Error("Failed to get sentence embeddings, falling back to fixed-size chunking", zap.Error(err))
		return cp.ProcessFixedSizeChunking(text)
	}
	
	// 3. 基于语义相似度和大小约束进行分块
	chunks := cp.semanticGroupSentences(sentences, sentenceEmbeddings)
	
	logger.Debug("Semantic chunking processing completed", 
		zap.Int("original_sentences", len(sentences)),
		zap.Int("initial_chunks", len(chunks)))
	
	return chunks, nil
}

// ProcessFixedSizeChunking 固定大小分块但保持句子完整性
func (cp *ChunkProcessor) ProcessFixedSizeChunking(text string) ([]string, error) {
	logger.Debug("Starting fixed-size chunking with sentence boundary preservation")
	
	sentences := cp.sentenceSplitter.SplitIntoSentences(text)
	if len(sentences) == 0 {
		return []string{text}, nil
	}
	
	var chunks []string
	var currentChunk strings.Builder
	
	for _, sentence := range sentences {
		// 检查添加当前句子是否会超过大小限制
		if currentChunk.Len() > 0 && currentChunk.Len()+len(sentence)+1 > cp.config.ChunkSize {
			// 如果超过限制，保存当前块并开始新块
			if currentChunk.Len() > 0 {
				chunks = append(chunks, strings.TrimSpace(currentChunk.String()))
				currentChunk.Reset()
			}
		}
		
		// 添加句子到当前块
		if currentChunk.Len() > 0 {
			currentChunk.WriteString(" ")
		}
		currentChunk.WriteString(sentence)
	}
	
	// 添加最后一个块
	if currentChunk.Len() > 0 {
		chunks = append(chunks, strings.TrimSpace(currentChunk.String()))
	}
	
	logger.Debug("Fixed-size chunking processing completed", 
		zap.Int("sentences", len(sentences)),
		zap.Int("chunks", len(chunks)))
	
	return chunks, nil
}

// getSentenceEmbeddings 获取句子嵌入向量
func (cp *ChunkProcessor) getSentenceEmbeddings(sentences []string) ([][]float32, error) {
	embeddings := make([][]float32, len(sentences))
	
	for i, sentence := range sentences {
		// 清理句子文本
		cleanSentence := strings.TrimSpace(sentence)
		if cleanSentence == "" {
			continue
		}
		
		// 获取嵌入向量
		embedding, err := cp.embeddingModel.Embedding(context.Background(), cleanSentence)
		if err != nil {
			return nil, fmt.Errorf("failed to get embedding for sentence %d: %w", i, err)
		}
		
		embeddings[i] = embedding
	}
	
	return embeddings, nil
}

// semanticGroupSentences 基于语义相似度对句子进行分组
func (cp *ChunkProcessor) semanticGroupSentences(sentences []string, embeddings [][]float32) []string {
	if len(sentences) == 0 {
		return []string{}
	}
	
	var chunks []string
	var currentGroup []string
	var currentGroupEmbedding []float32
	
	// 语义相似度阈值 - 提高到0.85以确保更强的语义连贯性
	similarityThreshold := float32(0.85)
	
	for i, sentence := range sentences {
		if embeddings[i] == nil {
			continue
		}
		
		// 如果是第一个句子或当前组为空
		if len(currentGroup) == 0 {
			currentGroup = []string{sentence}
			currentGroupEmbedding = embeddings[i]
			continue
		}
		
		// 计算与当前组的语义相似度
		similarity := cp.semanticAnalyzer.CalculateCosineSimilarity(currentGroupEmbedding, embeddings[i])
		
		// 检查大小约束
		groupText := strings.Join(currentGroup, " ") + " " + sentence
		exceedsMaxSize := len(groupText) > cp.config.MaxChunkSize
		
		// 决定是否开始新组
		shouldStartNewGroup := similarity < similarityThreshold || exceedsMaxSize
		
		if shouldStartNewGroup && len(currentGroup) > 0 {
			// 保存当前组
			chunks = append(chunks, strings.Join(currentGroup, " "))
			
			// 开始新组
			currentGroup = []string{sentence}
			currentGroupEmbedding = embeddings[i]
		} else {
			// 添加到当前组
			currentGroup = append(currentGroup, sentence)
			
			// 更新组的嵌入向量（使用平均值）
			if len(currentGroup) > 1 {
				currentGroupEmbedding = cp.averageEmbeddings(currentGroupEmbedding, embeddings[i])
			}
		}
	}
	
	// 添加最后一组
	if len(currentGroup) > 0 {
		chunks = append(chunks, strings.Join(currentGroup, " "))
	}
	
	return chunks
}

// averageEmbeddings 计算两个嵌入向量的平均值
func (cp *ChunkProcessor) averageEmbeddings(vec1, vec2 []float32) []float32 {
	if len(vec1) != len(vec2) {
		return vec1 // 如果维度不匹配，返回第一个向量
	}
	
	result := make([]float32, len(vec1))
	for i := range vec1 {
		result[i] = (vec1[i] + vec2[i]) / 2.0
	}
	
	return result
}

// GetChunkEmbedding 获取分块的嵌入向量
func (cp *ChunkProcessor) GetChunkEmbedding(chunk string) ([]float32, error) {
	cleanChunk := strings.TrimSpace(chunk)
	if cleanChunk == "" {
		return nil, fmt.Errorf("empty chunk")
	}
	
	return cp.embeddingModel.Embedding(context.Background(), cleanChunk)
}