"""
向量数据库服务模块
负责文档向量化和相似度搜索
"""
import os
import json
import numpy as np
from typing import List, Dict, Any, Optional
import requests
from ..core.config import config


class VectorDBService:
    """向量数据库服务"""
    
    def __init__(self):
        self.base_url = self._get_ollama_url()
        self.embedding_model = self._get_embedding_model()
        self.collection_path = config.get('vector_db.path', 'data/vectors')
        os.makedirs(self.collection_path, exist_ok=True)
    
    def _get_ollama_url(self) -> str:
        """获取Ollama服务地址"""
        for provider in config.get('ai.providers', []):
            if provider.get('name') == 'ollama':
                return provider.get('base_url', 'http://localhost:11434')
        return 'http://localhost:11434'
    
    def _get_embedding_model(self) -> str:
        """获取嵌入模型名称"""
        # 从配置中获取第一个嵌入模型
        for provider in config.get('ai.providers', []):
            if provider.get('name') == 'ollama':
                for model in provider.get('models', []):
                    if model.get('type') == 'embedding':
                        return model.get('name')
        
        # 默认使用配置的嵌入模型
        return config.get('ai.embedding_model', 'sentence-transformers/all-MiniLM-L6-v2')
    
    def get_embeddings(self, texts: List[str]) -> List[List[float]]:
        """获取文本嵌入向量"""
        if not texts:
            return []
        
        try:
            # 使用Ollama的嵌入API
            response = requests.post(
                f"{self.base_url}/api/embeddings",
                json={
                    "model": self.embedding_model,
                    "prompt": texts[0] if len(texts) == 1 else texts
                },
                timeout=30
            )
            
            if response.status_code == 200:
                data = response.json()
                if 'embeddings' in data:
                    return data['embeddings']
                elif 'embedding' in data:
                    return [data['embedding']]
            
            # 如果Ollama失败，使用简单的TF-IDF模拟
            return self._simple_embeddings(texts)
            
        except Exception as e:
            print(f"获取嵌入向量失败: {e}")
            return self._simple_embeddings(texts)
    
    def _simple_embeddings(self, texts: List[str]) -> List[List[float]]:
        """简单的文本向量化（用于测试）"""
        embeddings = []
        for text in texts:
            # 简单的字符频率向量化
            text_lower = text.lower()
            vector = [0.0] * 128  # 128维向量
            
            for char in text_lower:
                if char.isalpha():
                    idx = ord(char) % 128
                    vector[idx] += 1.0
            
            # 归一化
            norm = np.linalg.norm(vector)
            if norm > 0:
                vector = [v / norm for v in vector]
            
            embeddings.append(vector)
        
        return embeddings
    
    def cosine_similarity(self, vec1: List[float], vec2: List[float]) -> float:
        """计算余弦相似度"""
        if len(vec1) != len(vec2):
            return 0.0
        
        dot_product = sum(a * b for a, b in zip(vec1, vec2))
        norm1 = np.linalg.norm(vec1)
        norm2 = np.linalg.norm(vec2)
        
        if norm1 == 0 or norm2 == 0:
            return 0.0
        
        return dot_product / (norm1 * norm2)
    
    def search_similar_chunks(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """搜索相似文档块"""
        from ..core.database import db
        
        # 获取查询向量
        query_embedding = self.get_embeddings([query])
        if not query_embedding:
            return []
        
        query_vec = query_embedding[0]
        
        # 获取所有文档块
        chunks = db.execute_query("""
            SELECT c.*, d.file_name, d.file_path 
            FROM document_chunks c 
            JOIN documents d ON c.document_id = d.id 
            WHERE d.processed = TRUE
        """)
        
        if not chunks:
            return []
        
        # 计算相似度 - 使用已存储的向量
        similarities = []
        for chunk in chunks:
            # 获取已存储的块向量
            chunk_embedding = self.get_chunk_embedding(chunk['id'])
            
            if chunk_embedding:
                # 使用向量相似度
                similarity = self.cosine_similarity(query_vec, chunk_embedding)
            else:
                # 如果没有存储的向量，使用文本相似度作为后备
                similarity = self._text_similarity(query, chunk['content'])
            
            similarities.append({
                'chunk': chunk,
                'similarity': similarity
            })
        
        # 按相似度排序
        similarities.sort(key=lambda x: x['similarity'], reverse=True)
        
        # 返回top_k结果
        results = []
        for item in similarities[:top_k]:
            chunk = item['chunk']
            results.append({
                'id': chunk['id'],
                'content': chunk['content'],
                'file_name': chunk['file_name'],
                'file_path': chunk['file_path'],
                'similarity': item['similarity'],
                'chunk_index': chunk['chunk_index']
            })
        
        return results
    
    def _text_similarity(self, query: str, text: str) -> float:
        """计算文本相似度（基于关键词匹配）"""
        query_words = set(query.lower().split())
        text_words = set(text.lower().split())
        
        if not query_words or not text_words:
            return 0.0
        
        # 计算Jaccard相似度
        intersection = len(query_words & text_words)
        union = len(query_words | text_words)
        
        if union == 0:
            return 0.0
        
        jaccard = intersection / union
        
        # 计算词频相似度
        query_freq = {}
        text_freq = {}
        
        for word in query_words:
            query_freq[word] = query.lower().count(word)
        
        for word in text_words:
            text_freq[word] = text.lower().count(word)
        
        # 计算共同词的频率相似度
        common_words = query_words & text_words
        if not common_words:
            return jaccard * 0.5  # 如果没有共同词，返回较低的相似度
        
        freq_similarity = 0
        for word in common_words:
            q_freq = query_freq.get(word, 0)
            t_freq = text_freq.get(word, 0)
            if q_freq > 0 and t_freq > 0:
                freq_similarity += min(q_freq, t_freq) / max(q_freq, t_freq)
        
        freq_similarity /= len(common_words)
        
        # 综合相似度
        return (jaccard * 0.6 + freq_similarity * 0.4)
    
    def store_chunk_embeddings(self, chunk_id: int, content: str):
        """存储文档块的嵌入向量"""
        # 获取嵌入向量
        embeddings = self.get_embeddings([content])
        if not embeddings:
            return
        
        # 存储向量（这里简化处理，实际应该存储到向量数据库）
        embedding_file = os.path.join(self.collection_path, f"chunk_{chunk_id}.json")
        with open(embedding_file, 'w', encoding='utf-8') as f:
            json.dump({
                'chunk_id': chunk_id,
                'embedding': embeddings[0]
            }, f, ensure_ascii=False, indent=2)
    
    def get_chunk_embedding(self, chunk_id: int) -> Optional[List[float]]:
        """获取文档块的嵌入向量"""
        embedding_file = os.path.join(self.collection_path, f"chunk_{chunk_id}.json")
        if not os.path.exists(embedding_file):
            return None
        
        try:
            with open(embedding_file, 'r', encoding='utf-8') as f:
                data = json.load(f)
                return data.get('embedding')
        except Exception as e:
            print(f"读取嵌入向量失败: {e}")
            return None


# 全局向量数据库服务实例
vector_db = VectorDBService() 