package vector

import (
	"fmt"
	"log"
	"sync"
	"time"

	"mcp-server/internal/docprocessor"
	"mcp-server/internal/qdrant"
	"mcp-server/internal/vector/processor"
)

// VectorManager 向量管理器
type VectorManager struct {
	vectorService    VectorService
	docProcessor     *docprocessor.DocumentProcessor
	qdrantClient     qdrant.QdrantClient
	config           *ManagerConfig
	mutex            sync.RWMutex
	initialized      bool
	updateInProgress bool
}

// ManagerConfig 管理器配置
type ManagerConfig struct {
	CollectionName   string `json:"collection_name"`
	VectorDimension  int    `json:"vector_dimension"`
	DistanceMetric   string `json:"distance_metric"`
	BatchSize        int    `json:"batch_size"`
	MaxRetries       int    `json:"max_retries"`
	UpdateInterval   int    `json:"update_interval_minutes"`
	EnableAutoUpdate bool   `json:"enable_auto_update"`
}

// DefaultManagerConfig 创建默认管理器配置
func DefaultManagerConfig() *ManagerConfig {
	return &ManagerConfig{
		CollectionName:   "chinese_documents",
		VectorDimension:  768, // BGE模型维度
		DistanceMetric:   "Cosine",
		BatchSize:        100,
		MaxRetries:       3,
		UpdateInterval:   5,
		EnableAutoUpdate: true,
	}
}

// UpdateResult 更新结果
type UpdateResult struct {
	TotalFiles       int                    `json:"total_files"`
	ProcessedFiles   int                    `json:"processed_files"`
	SuccessFiles     int                    `json:"success_files"`
	FailedFiles      int                    `json:"failed_files"`
	TotalChunks      int                    `json:"total_chunks"`
	VectorizedChunks int                    `json:"vectorized_chunks"`
	ProcessTime      int64                  `json:"process_time_ms"`
	Errors           []string               `json:"errors,omitempty"`
	Statistics       map[string]interface{} `json:"statistics"`
}

// NewVectorManager 创建向量管理器
func NewVectorManager(qdrantClient qdrant.QdrantClient) *VectorManager {
	return &VectorManager{
		vectorService: NewVectorService(),
		docProcessor:  docprocessor.NewDocumentProcessor(),
		qdrantClient:  qdrantClient,
		config:        DefaultManagerConfig(),
	}
}

// Initialize 初始化向量管理器
func (vm *VectorManager) Initialize(config *ManagerConfig) error {
	vm.mutex.Lock()
	defer vm.mutex.Unlock()

	if config != nil {
		vm.config = config
	}

	log.Println("正在初始化向量管理器...")

	// 初始化向量服务
	if err := vm.vectorService.Initialize(DefaultServiceConfig()); err != nil {
		return fmt.Errorf("初始化向量服务失败: %v", err)
	}

	// 确保集合存在
	if err := vm.ensureCollection(); err != nil {
		return fmt.Errorf("创建向量集合失败: %v", err)
	}

	vm.initialized = true
	log.Println("向量管理器初始化完成")

	return nil
}

// Close 关闭向量管理器
func (vm *VectorManager) Close() error {
	vm.mutex.Lock()
	defer vm.mutex.Unlock()

	if !vm.initialized {
		return nil
	}

	log.Println("正在关闭向量管理器...")

	// 关闭向量服务
	if vm.vectorService != nil {
		if err := vm.vectorService.Close(); err != nil {
			log.Printf("关闭向量服务时出错: %v", err)
		}
	}

	vm.initialized = false
	log.Println("向量管理器已关闭")

	return nil
}

// ProcessAndStoreDocument 处理并存储单个文档
func (vm *VectorManager) ProcessAndStoreDocument(filePath string) (*UpdateResult, error) {
	if !vm.initialized {
		return nil, fmt.Errorf("向量管理器未初始化")
	}

	startTime := time.Now()
	result := &UpdateResult{
		TotalFiles: 1,
		Errors:     make([]string, 0),
	}

	// 处理文档
	splitResult, err := vm.docProcessor.ProcessFile(filePath)
	if err != nil {
		result.FailedFiles = 1
		result.Errors = append(result.Errors, fmt.Sprintf("处理文件失败 %s: %v", filePath, err))
		result.ProcessTime = time.Since(startTime).Milliseconds()
		return result, nil
	}

	result.ProcessedFiles = 1
	result.TotalChunks = len(splitResult.Chunks)

	// 向量化文档
	vectorResult, err := vm.vectorService.VectorizeDocument(splitResult)
	if err != nil {
		result.FailedFiles = 1
		result.Errors = append(result.Errors, fmt.Sprintf("向量化失败 %s: %v", filePath, err))
		result.ProcessTime = time.Since(startTime).Milliseconds()
		return result, nil
	}

	// 存储向量到Qdrant
	if err := vm.storeVectors(vectorResult); err != nil {
		result.FailedFiles = 1
		result.Errors = append(result.Errors, fmt.Sprintf("存储向量失败 %s: %v", filePath, err))
		result.ProcessTime = time.Since(startTime).Milliseconds()
		return result, nil
	}

	result.SuccessFiles = 1
	result.VectorizedChunks = vectorResult.SuccessCount
	result.ProcessTime = time.Since(startTime).Milliseconds()
	result.Statistics = vectorResult.Statistics

	log.Printf("文档处理完成: %s, 分片数: %d, 向量化成功: %d",
		filePath, result.TotalChunks, result.VectorizedChunks)

	return result, nil
}

// ProcessAndStoreDirectory 处理并存储目录中的所有文档
func (vm *VectorManager) ProcessAndStoreDirectory(dirPath string, recursive bool) (*UpdateResult, error) {
	if !vm.initialized {
		return nil, fmt.Errorf("向量管理器未初始化")
	}

	vm.mutex.Lock()
	if vm.updateInProgress {
		vm.mutex.Unlock()
		return nil, fmt.Errorf("更新正在进行中，请稍后再试")
	}
	vm.updateInProgress = true
	vm.mutex.Unlock()

	defer func() {
		vm.mutex.Lock()
		vm.updateInProgress = false
		vm.mutex.Unlock()
	}()

	startTime := time.Now()
	log.Printf("开始处理目录: %s (递归: %v)", dirPath, recursive)

	// 处理目录中的所有文档
	splitResults, err := vm.docProcessor.ProcessDirectory(dirPath, recursive)
	if err != nil {
		return nil, fmt.Errorf("处理目录失败: %v", err)
	}

	result := &UpdateResult{
		TotalFiles:     len(splitResults),
		ProcessedFiles: len(splitResults),
		Errors:         make([]string, 0),
	}

	successCount := 0
	totalChunks := 0
	vectorizedChunks := 0

	// 批量处理文档
	for _, splitResult := range splitResults {
		if !splitResult.Success {
			result.Errors = append(result.Errors, fmt.Sprintf("文档分片失败: %s", splitResult.Error))
			continue
		}

		totalChunks += len(splitResult.Chunks)

		// 向量化文档
		vectorResult, err := vm.vectorService.VectorizeDocument(splitResult)
		if err != nil {
			result.Errors = append(result.Errors, fmt.Sprintf("向量化失败: %v", err))
			continue
		}

		// 存储向量
		if err := vm.storeVectors(vectorResult); err != nil {
			result.Errors = append(result.Errors, fmt.Sprintf("存储向量失败: %v", err))
			continue
		}

		successCount++
		vectorizedChunks += vectorResult.SuccessCount
	}

	result.SuccessFiles = successCount
	result.FailedFiles = result.TotalFiles - successCount
	result.TotalChunks = totalChunks
	result.VectorizedChunks = vectorizedChunks
	result.ProcessTime = time.Since(startTime).Milliseconds()

	// 计算统计信息
	result.Statistics = map[string]interface{}{
		"success_rate":               float64(successCount) / float64(result.TotalFiles),
		"average_chunks_per_file":    float64(totalChunks) / float64(result.ProcessedFiles),
		"vectorization_success_rate": float64(vectorizedChunks) / float64(totalChunks),
		"average_process_time_ms":    float64(result.ProcessTime) / float64(result.ProcessedFiles),
	}

	log.Printf("目录处理完成: 总文件 %d, 成功 %d, 失败 %d, 总分片 %d, 向量化成功 %d",
		result.TotalFiles, result.SuccessFiles, result.FailedFiles,
		result.TotalChunks, result.VectorizedChunks)

	return result, nil
}

// SearchSimilar 搜索相似文档
func (vm *VectorManager) SearchSimilar(query string, limit int) ([]qdrant.SearchResult, error) {
	if !vm.initialized {
		return nil, fmt.Errorf("向量管理器未初始化")
	}

	// 向量化查询文本
	vectorResult, err := vm.vectorService.VectorizeText(query, nil)
	if err != nil {
		return nil, fmt.Errorf("查询向量化失败: %v", err)
	}

	if !vectorResult.Success {
		return nil, fmt.Errorf("查询向量化失败: %s", vectorResult.Error)
	}

	// 构造查询向量
	queryVector := qdrant.Vector{
		ID:     "query",
		Vector: vectorResult.Vector,
	}

	// 执行向量搜索
	results, err := vm.qdrantClient.SearchVectors(queryVector, limit)
	if err != nil {
		return nil, fmt.Errorf("向量搜索失败: %v", err)
	}

	return results, nil
}

// GetStatus 获取管理器状态
func (vm *VectorManager) GetStatus() map[string]interface{} {
	vm.mutex.RLock()
	defer vm.mutex.RUnlock()

	status := make(map[string]interface{})
	status["initialized"] = vm.initialized
	status["update_in_progress"] = vm.updateInProgress
	status["config"] = vm.config

	if vm.vectorService != nil {
		status["vector_service"] = vm.vectorService.GetServiceStatus()
	}

	// 获取集合信息
	if vm.qdrantClient != nil {
		if collectionInfo, err := vm.qdrantClient.GetCollectionInfo(vm.config.CollectionName); err == nil {
			status["collection_info"] = collectionInfo
		}
	}

	return status
}

// ensureCollection 确保向量集合存在
func (vm *VectorManager) ensureCollection() error {
	// 检查集合是否存在
	_, err := vm.qdrantClient.GetCollectionInfo(vm.config.CollectionName)
	if err == nil {
		log.Printf("向量集合已存在: %s", vm.config.CollectionName)
		return nil
	}

	// 创建新集合
	collectionConfig := qdrant.CollectionConfig{
		VectorSize: vm.config.VectorDimension,
		Distance:   vm.config.DistanceMetric,
	}

	if err := vm.qdrantClient.CreateCollection(vm.config.CollectionName, collectionConfig); err != nil {
		return fmt.Errorf("创建集合失败: %v", err)
	}

	log.Printf("向量集合创建成功: %s", vm.config.CollectionName)
	return nil
}

// storeVectors 存储向量到Qdrant
func (vm *VectorManager) storeVectors(batchResult *processor.BatchVectorizeResult) error {
	if batchResult == nil || len(batchResult.Results) == 0 {
		return fmt.Errorf("没有向量需要存储")
	}

	vectors := make([]qdrant.Vector, 0, len(batchResult.Results))

	for _, result := range batchResult.Results {
		if !result.Success {
			continue
		}

		vector := qdrant.Vector{
			ID:     result.ChunkID,
			Vector: result.Vector,
			Payload: map[string]interface{}{
				"text":         result.Text,
				"chunk_id":     result.ChunkID,
				"process_time": result.ProcessTime,
			},
		}

		// 添加元数据
		for k, v := range result.Metadata {
			vector.Payload[k] = v
		}

		// 添加质量指标
		if result.Quality != nil {
			vector.Payload["quality"] = map[string]interface{}{
				"vector_norm": result.Quality.VectorNorm,
				"text_length": result.Quality.TextLength,
				"consistency": result.Quality.Consistency,
				"confidence":  result.Quality.Confidence,
			}
		}

		vectors = append(vectors, vector)
	}

	if len(vectors) == 0 {
		return fmt.Errorf("没有成功的向量化结果")
	}

	// 批量插入向量
	if err := vm.qdrantClient.InsertVectors(vectors); err != nil {
		return fmt.Errorf("插入向量失败: %v", err)
	}

	log.Printf("成功存储 %d 个向量到集合: %s", len(vectors), vm.config.CollectionName)
	return nil
}

// DeleteDocumentVectors 删除文档的所有向量
func (vm *VectorManager) DeleteDocumentVectors(filePath string) error {
	if !vm.initialized {
		return fmt.Errorf("向量管理器未初始化")
	}

	// 这里需要根据文件路径查找相关的向量ID
	// 由于当前实现的限制，这是一个简化版本
	log.Printf("删除文档向量: %s (功能待完善)", filePath)

	return nil
}

// ValidateVectorConsistency 验证向量一致性
func (vm *VectorManager) ValidateVectorConsistency(sampleTexts []string) (map[string]*processor.ConsistencyResult, error) {
	if !vm.initialized {
		return nil, fmt.Errorf("向量管理器未初始化")
	}

	results := make(map[string]*processor.ConsistencyResult)

	for i, text := range sampleTexts {
		result, err := vm.vectorService.ValidateConsistency(text, 3)
		if err != nil {
			log.Printf("验证文本 %d 一致性失败: %v", i, err)
			continue
		}

		key := fmt.Sprintf("sample_%d", i)
		results[key] = result
	}

	return results, nil
}

// SetConfig 设置配置
func (vm *VectorManager) SetConfig(config *ManagerConfig) {
	vm.mutex.Lock()
	defer vm.mutex.Unlock()

	if config != nil {
		vm.config = config
	}
}

// GetConfig 获取配置
func (vm *VectorManager) GetConfig() *ManagerConfig {
	vm.mutex.RLock()
	defer vm.mutex.RUnlock()

	return vm.config
}

// IsInitialized 检查是否已初始化
func (vm *VectorManager) IsInitialized() bool {
	vm.mutex.RLock()
	defer vm.mutex.RUnlock()

	return vm.initialized
}

// IsUpdateInProgress 检查是否正在更新
func (vm *VectorManager) IsUpdateInProgress() bool {
	vm.mutex.RLock()
	defer vm.mutex.RUnlock()

	return vm.updateInProgress
}
