package processor

import (
	"fmt"
	"log"
	"sync"
	"time"

	"mcp-server/internal/types"
	"mcp-server/internal/vector/models"
)

// Vectorizer 向量化处理器
type Vectorizer struct {
	model       models.EmbeddingModel
	config      *VectorizerConfig
	mutex       sync.RWMutex
	initialized bool
}

// VectorizerConfig 向量化配置
type VectorizerConfig struct {
	BatchSize      int     `json:"batch_size"`
	MaxConcurrency int     `json:"max_concurrency"`
	TimeoutSeconds int     `json:"timeout_seconds"`
	RetryAttempts  int     `json:"retry_attempts"`
	QualityCheck   bool    `json:"quality_check"`
	MinSimilarity  float32 `json:"min_similarity"`
	EnableCache    bool    `json:"enable_cache"`
}

// VectorizeResult 向量化结果
type VectorizeResult struct {
	ChunkID     string                 `json:"chunk_id"`
	Vector      []float32              `json:"vector"`
	Text        string                 `json:"text"`
	Metadata    map[string]interface{} `json:"metadata"`
	Quality     *QualityMetrics        `json:"quality,omitempty"`
	ProcessTime int64                  `json:"process_time_ms"`
	Success     bool                   `json:"success"`
	Error       string                 `json:"error,omitempty"`
}

// QualityMetrics 质量指标
type QualityMetrics struct {
	VectorNorm  float32 `json:"vector_norm"`
	TextLength  int     `json:"text_length"`
	Consistency float32 `json:"consistency"`
	Confidence  float32 `json:"confidence"`
}

// BatchVectorizeResult 批量向量化结果
type BatchVectorizeResult struct {
	Results      []*VectorizeResult     `json:"results"`
	TotalCount   int                    `json:"total_count"`
	SuccessCount int                    `json:"success_count"`
	FailureCount int                    `json:"failure_count"`
	ProcessTime  int64                  `json:"process_time_ms"`
	Statistics   map[string]interface{} `json:"statistics"`
}

// DefaultVectorizerConfig 创建默认向量化配置
func DefaultVectorizerConfig() *VectorizerConfig {
	return &VectorizerConfig{
		BatchSize:      32,
		MaxConcurrency: 4,
		TimeoutSeconds: 30,
		RetryAttempts:  3,
		QualityCheck:   true,
		MinSimilarity:  0.7,
		EnableCache:    true,
	}
}

// NewVectorizer 创建向量化处理器
func NewVectorizer(model models.EmbeddingModel) *Vectorizer {
	return &Vectorizer{
		model:  model,
		config: DefaultVectorizerConfig(),
	}
}

// Initialize 初始化向量化处理器
func (v *Vectorizer) Initialize(config *VectorizerConfig) error {
	v.mutex.Lock()
	defer v.mutex.Unlock()

	if config != nil {
		v.config = config
	}

	if v.model == nil {
		return fmt.Errorf("嵌入模型不能为空")
	}

	log.Println("正在初始化向量化处理器...")

	v.initialized = true
	log.Printf("向量化处理器初始化完成，批处理大小: %d", v.config.BatchSize)

	return nil
}

// VectorizeText 向量化单个文本
func (v *Vectorizer) VectorizeText(text string, metadata map[string]interface{}) (*VectorizeResult, error) {
	if !v.initialized {
		return nil, fmt.Errorf("向量化处理器未初始化")
	}

	if text == "" {
		return nil, fmt.Errorf("输入文本不能为空")
	}

	startTime := time.Now()

	// 生成向量
	vector, err := v.model.GetEmbedding(text)
	if err != nil {
		return &VectorizeResult{
			Text:        text,
			Metadata:    metadata,
			ProcessTime: time.Since(startTime).Milliseconds(),
			Success:     false,
			Error:       err.Error(),
		}, nil
	}

	// 质量检查
	var quality *QualityMetrics
	if v.config.QualityCheck {
		quality = v.calculateQuality(text, vector)
	}

	// 生成ChunkID
	chunkID := v.generateChunkID(text, metadata)

	result := &VectorizeResult{
		ChunkID:     chunkID,
		Vector:      vector,
		Text:        text,
		Metadata:    metadata,
		Quality:     quality,
		ProcessTime: time.Since(startTime).Milliseconds(),
		Success:     true,
	}

	return result, nil
}

// VectorizeChunk 向量化文档分片
func (v *Vectorizer) VectorizeChunk(chunk *types.TextChunk) (*VectorizeResult, error) {
	if chunk == nil {
		return nil, fmt.Errorf("文档分片不能为空")
	}

	// 准备元数据
	metadata := map[string]interface{}{
		"chunk_id":     chunk.ID,
		"start_line":   chunk.StartLine,
		"end_line":     chunk.EndLine,
		"chunk_index":  chunk.ChunkIndex,
		"semantic_tag": chunk.SemanticTag,
		"created_at":   chunk.CreatedAt,
	}

	// 添加自定义元数据
	for k, v := range chunk.Metadata {
		if _, exists := metadata[k]; !exists {
			metadata[k] = v
		}
	}

	return v.VectorizeText(chunk.Content, metadata)
}

// VectorizeChunks 批量向量化文档分片
func (v *Vectorizer) VectorizeChunks(chunks []*types.TextChunk) (*BatchVectorizeResult, error) {
	if len(chunks) == 0 {
		return nil, fmt.Errorf("文档分片列表不能为空")
	}

	startTime := time.Now()
	results := make([]*VectorizeResult, len(chunks))

	// 使用工作池进行并发处理
	semaphore := make(chan struct{}, v.config.MaxConcurrency)
	var wg sync.WaitGroup
	var mutex sync.Mutex

	successCount := 0
	failureCount := 0

	for i, chunk := range chunks {
		wg.Add(1)
		go func(index int, c *types.TextChunk) {
			defer wg.Done()

			// 获取信号量
			semaphore <- struct{}{}
			defer func() { <-semaphore }()

			result, err := v.VectorizeChunk(c)
			if err != nil {
				result = &VectorizeResult{
					ChunkID: c.ID,
					Text:    c.Content,
					Success: false,
					Error:   err.Error(),
				}
			}

			mutex.Lock()
			results[index] = result
			if result.Success {
				successCount++
			} else {
				failureCount++
			}
			mutex.Unlock()
		}(i, chunk)
	}

	wg.Wait()

	// 计算统计信息
	statistics := v.calculateStatistics(results)

	batchResult := &BatchVectorizeResult{
		Results:      results,
		TotalCount:   len(chunks),
		SuccessCount: successCount,
		FailureCount: failureCount,
		ProcessTime:  time.Since(startTime).Milliseconds(),
		Statistics:   statistics,
	}

	return batchResult, nil
}

// VectorizeDocument 向量化整个文档
func (v *Vectorizer) VectorizeDocument(splitResult *types.SplitResult) (*BatchVectorizeResult, error) {
	if splitResult == nil {
		return nil, fmt.Errorf("分片结果不能为空")
	}

	if !splitResult.Success {
		return nil, fmt.Errorf("文档分片失败: %s", splitResult.Error)
	}

	return v.VectorizeChunks(splitResult.Chunks)
}

// ValidateConsistency 验证向量化结果的一致性
func (v *Vectorizer) ValidateConsistency(text string, attempts int) (*ConsistencyResult, error) {
	if attempts < 2 {
		attempts = 2
	}
	if attempts > 10 {
		attempts = 10
	}

	vectors := make([][]float32, attempts)
	var wg sync.WaitGroup
	var mutex sync.Mutex
	errors := make([]error, attempts)

	for i := 0; i < attempts; i++ {
		wg.Add(1)
		go func(index int) {
			defer wg.Done()

			vector, err := v.model.GetEmbedding(text)

			mutex.Lock()
			vectors[index] = vector
			errors[index] = err
			mutex.Unlock()
		}(i)
	}

	wg.Wait()

	// 检查错误
	for _, err := range errors {
		if err != nil {
			return nil, fmt.Errorf("向量化失败: %v", err)
		}
	}

	// 计算一致性
	consistency := v.calculateVectorConsistency(vectors)

	return &ConsistencyResult{
		Text:        text,
		Attempts:    attempts,
		Vectors:     vectors,
		Consistency: consistency,
		Passed:      consistency >= v.config.MinSimilarity,
	}, nil
}

// ConsistencyResult 一致性验证结果
type ConsistencyResult struct {
	Text        string      `json:"text"`
	Attempts    int         `json:"attempts"`
	Vectors     [][]float32 `json:"vectors"`
	Consistency float32     `json:"consistency"`
	Passed      bool        `json:"passed"`
}

// generateChunkID 生成分片ID
func (v *Vectorizer) generateChunkID(text string, metadata map[string]interface{}) string {
	// 基于文本内容和元数据生成唯一ID
	hash := v.simpleHash(text)

	if filePath, ok := metadata["file_path"].(string); ok {
		hash += v.simpleHash(filePath)
	}

	return fmt.Sprintf("chunk_%x", hash)
}

// simpleHash 简单哈希函数
func (v *Vectorizer) simpleHash(text string) uint32 {
	var hash uint32 = 5381
	for _, char := range text {
		hash = ((hash << 5) + hash) + uint32(char)
	}
	return hash
}

// calculateQuality 计算向量质量指标
func (v *Vectorizer) calculateQuality(text string, vector []float32) *QualityMetrics {
	// 计算向量范数
	var norm float32
	for _, v := range vector {
		norm += v * v
	}

	return &QualityMetrics{
		VectorNorm:  norm,
		TextLength:  len([]rune(text)),
		Consistency: 1.0, // 单次向量化默认一致性为1
		Confidence:  0.9, // 模拟置信度
	}
}

// calculateVectorConsistency 计算向量一致性
func (v *Vectorizer) calculateVectorConsistency(vectors [][]float32) float32 {
	if len(vectors) < 2 {
		return 1.0
	}

	totalSimilarity := float32(0)
	comparisons := 0

	for i := 0; i < len(vectors); i++ {
		for j := i + 1; j < len(vectors); j++ {
			similarity := v.cosineSimilarity(vectors[i], vectors[j])
			totalSimilarity += similarity
			comparisons++
		}
	}

	if comparisons == 0 {
		return 1.0
	}

	return totalSimilarity / float32(comparisons)
}

// cosineSimilarity 计算余弦相似度
func (v *Vectorizer) cosineSimilarity(a, b []float32) float32 {
	if len(a) != len(b) {
		return 0
	}

	var dotProduct, normA, normB float32

	for i := 0; i < len(a); i++ {
		dotProduct += a[i] * b[i]
		normA += a[i] * a[i]
		normB += b[i] * b[i]
	}

	if normA == 0 || normB == 0 {
		return 0
	}

	return dotProduct / (float32(sqrt(float64(normA))) * float32(sqrt(float64(normB))))
}

// sqrt 平方根函数
func sqrt(x float64) float64 {
	if x == 0 {
		return 0
	}

	// 使用牛顿法计算平方根
	z := x
	for i := 0; i < 10; i++ {
		z = (z + x/z) / 2
	}
	return z
}

// calculateStatistics 计算统计信息
func (v *Vectorizer) calculateStatistics(results []*VectorizeResult) map[string]interface{} {
	stats := make(map[string]interface{})

	if len(results) == 0 {
		return stats
	}

	var totalProcessTime int64
	var totalTextLength int
	var totalVectorNorm float32
	successCount := 0

	for _, result := range results {
		totalProcessTime += result.ProcessTime
		totalTextLength += len([]rune(result.Text))

		if result.Success {
			successCount++
			if result.Quality != nil {
				totalVectorNorm += result.Quality.VectorNorm
			}
		}
	}

	stats["total_results"] = len(results)
	stats["success_count"] = successCount
	stats["failure_count"] = len(results) - successCount
	stats["success_rate"] = float64(successCount) / float64(len(results))
	stats["average_process_time_ms"] = float64(totalProcessTime) / float64(len(results))
	stats["average_text_length"] = float64(totalTextLength) / float64(len(results))

	if successCount > 0 {
		stats["average_vector_norm"] = float64(totalVectorNorm) / float64(successCount)
	}

	return stats
}

// SetConfig 设置配置
func (v *Vectorizer) SetConfig(config *VectorizerConfig) {
	v.mutex.Lock()
	defer v.mutex.Unlock()

	if config != nil {
		v.config = config
	}
}

// GetConfig 获取配置
func (v *Vectorizer) GetConfig() *VectorizerConfig {
	v.mutex.RLock()
	defer v.mutex.RUnlock()

	return v.config
}

// GetModel 获取嵌入模型
func (v *Vectorizer) GetModel() models.EmbeddingModel {
	return v.model
}

// IsInitialized 检查是否已初始化
func (v *Vectorizer) IsInitialized() bool {
	v.mutex.RLock()
	defer v.mutex.RUnlock()

	return v.initialized
}
