"""
向量数据库实现 - 使用轻量级方案
支持语义相似度搜索，提升知识检索效率
"""
import os
import json
import numpy as np
from typing import List, Dict, Tuple, Optional
import pickle
import hashlib

try:
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.metrics.pairwise import cosine_similarity
    HAS_SKLEARN = True
except ImportError:
    HAS_SKLEARN = False
    print("警告: scikit-learn 未安装，向量数据库功能将使用简化模式")

# 简化的相似度计算函数
def simple_cosine_similarity(vec1, vec2):
    """简化版余弦相似度计算"""
    if not vec1 or not vec2:
        return 0.0
    
    dot_product = sum(v1 * v2 for v1, v2 in zip(vec1, vec2))
    norm1 = sum(v * v for v in vec1) ** 0.5
    norm2 = sum(v * v for v in vec2) ** 0.5
    
    if norm1 == 0 or norm2 == 0:
        return 0.0
    
    return dot_product / (norm1 * norm2)

# 简化的TF-IDF向量化器
class SimpleVectorizer:
    """简化版文本向量化器"""
    
    def __init__(self):
        self.vocab = {}
        self.idf = {}
        self.doc_count = 0
    
    def fit_transform(self, documents):
        """训练并转换文档"""
        if not documents:
            return []
        
        # 构建词汇表
        word_counts = {}
        doc_word_presence = {}
        
        for doc in documents:
            words = self._tokenize(doc)
            for word in set(words):
                doc_word_presence[word] = doc_word_presence.get(word, 0) + 1
            for word in words:
                word_counts[word] = word_counts.get(word, 0) + 1
        
        # 构建词汇表索引
        self.vocab = {word: idx for idx, word in enumerate(sorted(word_counts.keys()))}
        
        # 计算IDF
        self.doc_count = len(documents)
        for word, count in doc_word_presence.items():
            self.idf[word] = np.log((self.doc_count + 1) / (count + 1)) + 1
        
        # 转换文档
        vectors = []
        for doc in documents:
            vectors.append(self.transform(doc))
        
        return vectors
    
    def transform(self, document):
        """转换单个文档"""
        words = self._tokenize(document)
        vector = [0.0] * len(self.vocab)
        
        # 计算词频
        word_freq = {}
        for word in words:
            word_freq[word] = word_freq.get(word, 0) + 1
        
        # 计算TF-IDF
        for word, count in word_freq.items():
            if word in self.vocab:
                idx = self.vocab[word]
                tf = count / len(words)
                idf = self.idf.get(word, 1.0)
                vector[idx] = tf * idf
        
        return vector
    
    def _tokenize(self, text):
        """简单分词"""
        return text.lower().split()


class VectorDB:
    """轻量级向量数据库实现"""
    
    def __init__(self, db_path: str = None):
        """
        初始化向量数据库
        
        Args:
            db_path: 数据库存储路径，如果为None则使用内存模式
        """
        self.db_path = db_path
        
        # 根据scikit-learn可用性选择向量化器
        if HAS_SKLEARN:
            self.vectorizer = TfidfVectorizer()
        else:
            self.vectorizer = SimpleVectorizer()
            
        self.documents = []  # 存储原始文档
        self.vectors = None   # 存储向量表示
        self.metadata = []    # 存储文档元数据
        
        if db_path and os.path.exists(db_path):
            self.load(db_path)
    
    def add_documents(self, documents: List[str], metadata: List[Dict] = None):
        """
        添加文档到向量数据库
        
        Args:
            documents: 文档内容列表
            metadata: 文档元数据列表
        """
        if not documents:
            return
        
        # 添加文档
        self.documents.extend(documents)
        
        # 添加元数据
        if metadata:
            self.metadata.extend(metadata)
        else:
            self.metadata.extend([{} for _ in documents])
        
        # 重新训练向量化器
        self._retrain_vectorizer()
        
        # 保存到文件
        if self.db_path:
            self.save(self.db_path)
    
    def search(self, query: str, top_k: int = 5, threshold: float = 0.3) -> List[Tuple[str, float, Dict]]:
        """
        语义搜索文档
        
        Args:
            query: 查询文本
            top_k: 返回最相似的前k个结果
            threshold: 相似度阈值
            
        Returns:
            包含(文档内容, 相似度分数, 元数据)的元组列表
        """
        if not self.documents or self.vectors is None:
            return []
        
        # 向量化查询
        if HAS_SKLEARN:
            query_vec = self.vectorizer.transform([query])
            # 计算相似度
            similarities = cosine_similarity(query_vec, self.vectors).flatten()
        else:
            query_vec = self.vectorizer.transform(query)
            # 使用简化版相似度计算
            similarities = []
            for doc_vec in self.vectors:
                similarity = simple_cosine_similarity(query_vec, doc_vec)
                similarities.append(similarity)
            similarities = np.array(similarities)
        
        # 获取最相似的结果
        results = []
        for idx in similarities.argsort()[::-1]:
            if similarities[idx] < threshold:
                continue
            
            results.append((
                self.documents[idx],
                float(similarities[idx]),
                self.metadata[idx]
            ))
            
            if len(results) >= top_k:
                break
        
        return results
    
    def _retrain_vectorizer(self):
        """重新训练向量化器"""
        if not self.documents:
            return
        
        # 训练TF-IDF向量化器
        self.vectors = self.vectorizer.fit_transform(self.documents)
    
    def save(self, file_path: str):
        """保存向量数据库到文件"""
        data = {
            'documents': self.documents,
            'metadata': self.metadata,
            'has_sklearn': HAS_SKLEARN
        }
        
        # 保存向量化器信息
        if HAS_SKLEARN:
            data['vectorizer_vocab'] = self.vectorizer.vocabulary_
            data['vectorizer_idf'] = self.vectorizer.idf_.tolist() if hasattr(self.vectorizer, 'idf_') else None
        else:
            # 保存简化向量化器信息
            data['vectorizer_vocab'] = self.vectorizer.vocab
            data['vectorizer_idf'] = self.vectorizer.idf
            data['doc_count'] = self.vectorizer.doc_count
        
        with open(file_path, 'wb') as f:
            pickle.dump(data, f)
    
    def load(self, file_path: str):
        """从文件加载向量数据库"""
        try:
            with open(file_path, 'rb') as f:
                data = pickle.load(f)
            
            self.documents = data['documents']
            self.metadata = data['metadata']
            
            # 重建向量化器
            if HAS_SKLEARN:
                self.vectorizer = TfidfVectorizer()
                if 'vectorizer_vocab' in data and 'vectorizer_idf' in data:
                    self.vectorizer.vocabulary_ = data['vectorizer_vocab']
                    if data['vectorizer_idf']:
                        self.vectorizer.idf_ = np.array(data['vectorizer_idf'])
            else:
                self.vectorizer = SimpleVectorizer()
                if 'vectorizer_vocab' in data:
                    self.vectorizer.vocab = data['vectorizer_vocab']
                if 'vectorizer_idf' in data:
                    self.vectorizer.idf = data['vectorizer_idf']
                if 'doc_count' in data:
                    self.vectorizer.doc_count = data['doc_count']
            
            # 重新计算向量
            self._retrain_vectorizer()
            
        except Exception as e:
            print(f"加载向量数据库失败: {e}")
            self.documents = []
            self.metadata = []
            self.vectors = None
    
    def clear(self):
        """清空数据库"""
        self.documents = []
        self.metadata = []
        self.vectors = None
        self.vectorizer = TfidfVectorizer()
        
        if self.db_path and os.path.exists(self.db_path):
            os.remove(self.db_path)
    
    def size(self) -> int:
        """返回数据库中的文档数量"""
        return len(self.documents)


class KnowledgeVectorDB:
    """知识库向量数据库包装类"""
    
    def __init__(self, knowledge_base_dir: str):
        """
        初始化知识库向量数据库
        
        Args:
            knowledge_base_dir: 知识库文件目录
        """
        self.knowledge_base_dir = knowledge_base_dir
        self.vector_dbs = {}  # 角色名 -> VectorDB实例
        self._init_vector_dbs()
    
    def _init_vector_dbs(self):
        """初始化所有知识库的向量数据库"""
        if not os.path.exists(self.knowledge_base_dir):
            os.makedirs(self.knowledge_base_dir)
            return
        
        for filename in os.listdir(self.knowledge_base_dir):
            if filename.endswith('.json'):
                role_name = filename[:-5]  # 移除.json扩展名
                self._load_knowledge_base(role_name)
    
    def _load_knowledge_base(self, role: str):
        """加载指定角色的知识库到向量数据库"""
        kb_path = os.path.join(self.knowledge_base_dir, f"{role}.json")
        vector_db_path = os.path.join(self.knowledge_base_dir, f"{role}.vector")
        
        if not os.path.exists(kb_path):
            return
        
        try:
            # 创建向量数据库实例
            vector_db = VectorDB(vector_db_path)
            
            # 如果向量数据库为空，从知识库文件加载数据
            if vector_db.size() == 0:
                with open(kb_path, 'r', encoding='utf-8') as f:
                    kb_data = json.load(f)
                
                documents = []
                metadata = []
                
                if 'qa_pairs' in kb_data:
                    for qa in kb_data['qa_pairs']:
                        question = qa.get('question', '')
                        answer = qa.get('answer', '')
                        
                        # 将问题和答案都作为文档
                        documents.append(question)
                        metadata.append({
                            'type': 'question',
                            'answer': answer,
                            'role': role
                        })
                        
                        documents.append(answer)
                        metadata.append({
                            'type': 'answer', 
                            'question': question,
                            'role': role
                        })
                
                vector_db.add_documents(documents, metadata)
            
            self.vector_dbs[role] = vector_db
            
        except Exception as e:
            print(f"加载知识库 {role} 到向量数据库失败: {e}")
    
    def search(self, role: str, query: str, top_k: int = 3, threshold: float = 0.2) -> List[Tuple[str, float, Dict]]:
        """
        在指定角色的知识库中搜索
        
        Args:
            role: 角色名称
            query: 查询文本
            top_k: 返回最相似的前k个结果
            threshold: 相似度阈值
            
        Returns:
            搜索结果列表
        """
        if role not in self.vector_dbs:
            return []
        
        return self.vector_dbs[role].search(query, top_k, threshold)
    
    def update_knowledge_base(self, role: str):
        """更新指定角色的知识库向量数据库"""
        if role in self.vector_dbs:
            # 重新加载知识库
            self.vector_dbs[role].clear()
        
        self._load_knowledge_base(role)
    
    def get_all_roles(self) -> List[str]:
        """获取所有有知识库的角色"""
        return list(self.vector_dbs.keys())