import os
import json
import hashlib
import sys
from datetime import datetime
from typing import List, Dict, Any
from chromadb import PersistentClient
from elasticsearch import Elasticsearch

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))


class KnowledgeBaseMaintainer:
    """
    知识库维护类，负责定期更新和去重优化
    """

    def __init__(self, chroma_path: str = "./chroma_db3", chroma_collection: str = "simple_rag3",
                 es_host: str = "http://localhost:9200", es_index: str = "simple_rag_es2"):
        """
        初始化知识库维护工具
        
        :param chroma_path: ChromaDB路径
        :param chroma_collection: ChromaDB集合名称
        :param es_host: Elasticsearch主机地址
        :param es_index: Elasticsearch索引名称
        """
        # 初始化ChromaDB客户端
        self.chroma_path = chroma_path
        self.chroma_collection_name = chroma_collection
        self.chroma_client = PersistentClient(path=chroma_path)
        self.chroma_collection = self.chroma_client.get_or_create_collection(name=chroma_collection)
        
        # 初始化Elasticsearch客户端
        self.es_host = es_host
        self.es_index = es_index
        self.es_client = Elasticsearch([es_host])
        
        # 维护日志文件
        self.maintenance_log_file = "./knowledge_maintenance_log.json"
    
    def _vectorize_single_text(self, text: str) -> list:
        """
        对单个文本进行向量化
        
        :param text: 要向量化的文本
        :return: 文本的向量表示
        """
        import os
        from openai import OpenAI
        
        # 从环境变量获取API密钥
        api_key = os.getenv("DASHSCOPE_API_KEY")
        if not api_key:
            raise ValueError("未设置环境变量 DASHSCOPE_API_KEY")
        
        # 初始化OpenAI兼容客户端（连接阿里云百炼服务）
        client = OpenAI(
            api_key=api_key,
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
        )
        
        try:
            # 使用DashScope对问题进行向量化
            completion = client.embeddings.create(
                model="text-embedding-v4",
                input=[text],
                dimensions=1024,  # 改回1024维以匹配ChromaDB期望的维度
                encoding_format="float"
            )
            
            if completion:
                return completion.data[0].embedding
            else:
                print(f"文本向量化失败，无返回结果")
                return [0.0] * 1024  # 返回一个零向量作为占位符
        except Exception as e:
            print(f"文本向量化过程中发生异常: {e}")
            return [0.0] * 1024  # 返回一个零向量作为占位符

    def add_new_documents(self, documents: list) -> dict:
        """
        添加新文档到知识库
        
        :param documents: 要添加的新文档列表，每个文档应包含content和其他元数据字段
        :return: 添加结果统计
        """
        added_count = 0
        failed_count = 0
        import time
        
        try:
            # 添加到ChromaDB
            chroma_client = PersistentClient(path=self.chroma_path)
            chroma_collection = chroma_client.get_or_create_collection(name=self.chroma_collection_name)
            
            # 添加到Elasticsearch
            es_client = Elasticsearch([self.es_host])
            
            for doc in documents:
                try:
                    # 向量化文档内容
                    content = doc.get("content", "")
                    if not content:
                        failed_count += 1
                        continue
                    
                    vector = self._vectorize_single_text(content)
                    
                    # 生成唯一ID
                    import uuid
                    doc_id = str(uuid.uuid4())
                    
                    # 准备元数据
                    timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
                    metadata = {
                        "source": doc.get("source", "added_document"),
                        "timestamp": timestamp,
                        "title": doc.get("title", ""),
                        "author": doc.get("author", ""),
                        "category": doc.get("category", ""),
                        "word_count": str(len(content)),
                    }
                    
                    # 添加到ChromaDB
                    chroma_collection.add(
                        ids=[doc_id],
                        embeddings=[vector],
                        metadatas=[metadata],
                        documents=[content]
                    )
                    
                    # 添加到Elasticsearch
                    es_doc = {
                        "content": content,
                        "source": doc.get("source", "added_document"),
                        "title": doc.get("title", ""),
                        "author": doc.get("author", ""),
                        "category": doc.get("category", ""),
                        "timestamp": timestamp
                    }
                    
                    es_client.index(
                        index=self.es_index,
                        id=doc_id,
                        document=es_doc
                    )
                    
                    added_count += 1
                except Exception as e:
                    print(f"添加文档时出错: {e}")
                    failed_count += 1
            
            # 刷新Elasticsearch索引
            es_client.indices.refresh(index=self.es_index)
            
            return {
                "added_count": added_count,
                "failed_count": failed_count,
                "total_processed": added_count + failed_count
            }
            
        except Exception as e:
            raise Exception(f"添加新文档过程中发生错误: {str(e)}")
    
    def remove_outdated_documents(self, criteria: dict) -> dict:
        """
        删除过时的文档
        
        :param criteria: 删除标准，支持:
                         - {"before_date": "YYYY-MM-DD"} 删除指定日期前的文档
                         - {"by_source": "source_name"} 删除指定来源的文档
                         - {"by_category": "category_name"} 删除指定分类的文档
        :return: 删除结果统计
        """
        removed_count = 0
        try:
            # 处理ChromaDB中的文档
            chroma_client = PersistentClient(path=self.chroma_path)
            chroma_collection = chroma_client.get_or_create_collection(name=self.chroma_collection_name)
            
            # 处理Elasticsearch中的文档
            es_client = Elasticsearch([self.es_host])
            
            # 构建ChromaDB过滤条件
            chroma_where_clause = {}
            es_query_conditions = []
            
            if "before_date" in criteria:
                # 注意：ChromaDB目前不支持直接的时间比较，需要特殊处理
                # 这里我们只是构造ES的查询条件
                es_query_conditions.append({
                    "range": {
                        "timestamp": {
                            "lt": criteria["before_date"]
                        }
                    }
                })
            
            if "by_source" in criteria:
                chroma_where_clause["source"] = criteria["by_source"]
                es_query_conditions.append({
                    "term": {
                        "source.keyword": criteria["by_source"]
                    }
                })
            
            if "by_category" in criteria:
                chroma_where_clause["category"] = criteria["by_category"]
                es_query_conditions.append({
                    "term": {
                        "category.keyword": criteria["by_category"]
                    }
                })
            
            # 从ChromaDB查找并删除符合条件的文档
            if chroma_where_clause:
                try:
                    # 查询符合条件的文档
                    query_result = chroma_collection.get(where=chroma_where_clause)
                    if query_result and query_result['ids']:
                        # 删除这些文档
                        chroma_collection.delete(ids=query_result['ids'])
                        removed_count += len(query_result['ids'])
                except Exception as e:
                    print(f"从ChromaDB删除文档时出错: {e}")
            
            # 从Elasticsearch查找并删除符合条件的文档
            if es_query_conditions:
                try:
                    # 构造ES删除查询
                    es_query = {
                        "query": {
                            "bool": {
                                "must": es_query_conditions
                            }
                        }
                    }
                    
                    # 执行删除-by-query操作
                    delete_result = es_client.delete_by_query(
                        index=self.es_index,
                        body=es_query
                    )
                    
                    # 更新删除计数（如果ChromaDB删除失败，则只计算ES的删除数量）
                    removed_count = max(removed_count, delete_result.get('deleted', 0))
                except Exception as e:
                    print(f"从Elasticsearch删除文档时出错: {e}")
            
            # 刷新Elasticsearch索引
            es_client.indices.refresh(index=self.es_index)
            
            return {
                "removed_count": removed_count,
                "criteria": criteria
            }
            
        except Exception as e:
            raise Exception(f"删除过时文档过程中发生错误: {str(e)}")
    
    def deduplicate_chunks(self) -> Dict[str, Any]:
        """
        去重优化：合并重复或高度相似的Chunk
        
        :return: 去重结果统计
        """
        # 从ChromaDB获取所有文档
        try:
            chroma_results = self.chroma_collection.get()
            documents = chroma_results.get("documents", [])
            ids = chroma_results.get("ids", [])
        except Exception as e:
            print(f"从ChromaDB获取文档时出错: {e}")
            return {"error": f"获取文档失败: {e}"}
        
        # 简化的去重逻辑：基于内容的MD5哈希
        unique_docs = {}
        duplicates = []
        
        for i, doc in enumerate(documents):
            # 计算文档内容的哈希值
            doc_hash = hashlib.md5(doc.encode('utf-8')).hexdigest()
            
            if doc_hash in unique_docs:
                # 发现重复文档
                duplicates.append({
                    "duplicate_id": ids[i],
                    "original_id": unique_docs[doc_hash]
                })
            else:
                # 新的唯一文档
                unique_docs[doc_hash] = ids[i]
        
        # 删除重复的文档
        removed_count = 0
        failed_count = 0
        
        for dup in duplicates:
            try:
                # 从ChromaDB删除重复文档
                self.chroma_collection.delete(ids=[dup["duplicate_id"]])
                
                # 从Elasticsearch删除重复文档
                try:
                    self.es_client.delete(
                        index=self.es_index,
                        id=dup["duplicate_id"]
                    )
                except Exception as es_delete_error:
                    print(f"从Elasticsearch删除文档时出错: {es_delete_error}")
                    # 这里不增加failed_count，因为ChromaDB删除成功即可
                
                removed_count += 1
            except Exception as e:
                print(f"删除重复文档时出错: {e}")
                failed_count += 1
        
        # 刷新Elasticsearch索引
        try:
            self.es_client.indices.refresh(index=self.es_index)
        except Exception as e:
            print(f"刷新Elasticsearch索引时出错: {e}")
        
        result = {
            "duplicate_count": len(duplicates),
            "removed_count": removed_count,
            "failed_count": failed_count,
            "unique_count": len(unique_docs)
        }
        
        # 记录维护日志
        self._log_maintenance_action("deduplicate", result)
        
        return result
    
    def perform_maintenance(self, actions: List[str], **kwargs) -> Dict[str, Any]:
        """
        执行知识库维护任务
        
        :param actions: 要执行的维护操作列表，如["deduplicate", "add_new", "remove_outdated"]
        :param kwargs: 其他参数
        :return: 维护结果
        """
        results = {}
        
        for action in actions:
            if action == "deduplicate":
                results[action] = self.deduplicate_chunks()
            elif action == "add_new":
                new_docs = kwargs.get("new_documents", [])
                results[action] = self.add_new_documents(new_docs)
            elif action == "remove_outdated":
                criteria = kwargs.get("outdated_criteria", {})
                results[action] = self.remove_outdated_documents(criteria)
            else:
                results[action] = {"error": f"未知的维护操作: {action}"}
        
        return results
    
    def _generate_document_id(self, document: Dict[str, Any]) -> str:
        """
        为文档生成唯一ID
        
        :param document: 文档内容
        :return: 文档ID
        """
        # 基于文档内容和时间戳生成唯一ID
        content = document.get("content", "")
        timestamp = datetime.now().strftime("%Y%m%d%H%M%S%f")
        content_hash = hashlib.md5(content.encode('utf-8')).hexdigest()[:8]
        return f"doc_{content_hash}_{timestamp}"
    
    def _log_maintenance_action(self, action: str, result: Dict[str, Any]):
        """
        记录维护操作日志
        
        :param action: 操作类型
        :param result: 操作结果
        """
        log_entry = {
            "timestamp": datetime.now().isoformat(),
            "action": action,
            "result": result
        }
        
        # 读取现有日志
        logs = []
        if os.path.exists(self.maintenance_log_file):
            try:
                with open(self.maintenance_log_file, 'r', encoding='utf-8') as f:
                    logs = json.load(f)
            except Exception as e:
                print(f"读取维护日志时出错: {e}")
        
        # 添加新日志
        logs.append(log_entry)
        
        # 保存日志
        try:
            with open(self.maintenance_log_file, 'w', encoding='utf-8') as f:
                json.dump(logs, f, ensure_ascii=False, indent=2)
        except Exception as e:
            print(f"保存维护日志时出错: {e}")


# 使用示例
if __name__ == "__main__":
    # 创建知识库维护实例
    maintainer = KnowledgeBaseMaintainer()
    
    # 执行去重操作
    dedup_result = maintainer.deduplicate_chunks()
    print("去重结果:", dedup_result)
    
    # 添加新文档示例
    new_documents = [
        {
            "content": "这是新的文档内容",
            "source": "new_source",
            "title": "新文档标题",
            "author": "作者",
            "category": "分类"
        }
    ]
    
    add_result = maintainer.add_new_documents(new_documents)
    print("添加文档结果:", add_result)