import os
import json
from typing import List, Dict, Optional, Any

# 轻量级RAG服务实现
class RAGService:
    _instance = None
    _initialized = False
    
    def __new__(cls):
        if cls._instance is None:
            cls._instance = super(RAGService, cls).__new__(cls)
        return cls._instance
    
    def __init__(self):
        if not self.__class__._initialized:
            self.documents = []  # 简单存储文档列表
            self.document_count = 0
            self.storage_path = "documents_store.json"
            self.__class__._initialized = True
            
            # 尝试加载已有的文档存储
            self.load_documents()
    
    def load_documents(self):
        """加载已有的文档存储"""
        if os.path.exists(self.storage_path):
            try:
                with open(self.storage_path, "r", encoding="utf-8") as f:
                    data = json.load(f)
                    self.documents = data["documents"]
                    self.document_count = data["document_count"]
                    print(f"成功加载文档存储，包含 {self.document_count} 个文档")
            except Exception as e:
                print(f"加载文档存储失败: {e}")
    
    def save_documents(self):
        """保存文档存储"""
        try:
            with open(self.storage_path, "w", encoding="utf-8") as f:
                json.dump({
                    "documents": self.documents,
                    "document_count": self.document_count
                }, f, ensure_ascii=False, indent=2)
            print(f"文档存储已保存，包含 {self.document_count} 个文档")
        except Exception as e:
            print(f"保存文档存储失败: {e}")
    
    def add_document(self, text: str, metadata: Optional[Dict] = None) -> bool:
        """添加单个文档"""
        if not metadata:
            metadata = {"source": "uploaded_document"}
            
        try:
            # 简单的文本分割
            chunks = self._split_text(text)
            
            # 存储文档信息
            document_info = {
                "id": self.document_count + 1,
                "metadata": metadata,
                "chunks": chunks,
                "total_chunks": len(chunks)
            }
            
            self.documents.append(document_info)
            self.document_count += 1
            
            # 保存文档存储
            self.save_documents()
            return True
        except Exception as e:
            print(f"添加文档失败: {str(e)}")
            return False
    
    def _split_text(self, text: str, chunk_size: int = 1000, chunk_overlap: int = 200):
        """简单的文本分割函数"""
        chunks = []
        start = 0
        text_length = len(text)
        
        while start < text_length:
            end = min(start + chunk_size, text_length)
            # 尽量在句子边界分割
            if end < text_length:
                # 寻找最近的句号、问号、感叹号或换行符
                punctuation_pos = max(
                    text.rfind('.', start, end),
                    text.rfind('?', start, end),
                    text.rfind('!', start, end),
                    text.rfind('\n', start, end)
                )
                if punctuation_pos > start + chunk_size // 2:
                    end = punctuation_pos + 1
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            # 下一个块从重叠位置开始
            start = end - chunk_overlap
            
        return chunks
    
    # 添加查询结果缓存
    _query_cache = {}
    _cache_size = 100  # 缓存大小限制
    
    def retrieve_documents(self, query: str, top_k: int = 3) -> List[Dict[str, Any]]:
        """关键词检索，带缓存优化"""
        k = top_k  # 兼容旧的参数名称
        if not self.documents:
            return []
        
        # 检查缓存
        cache_key = f"{query}_{top_k}"
        if cache_key in self._query_cache:
            return self._query_cache[cache_key]
        
        try:
            query_lower = query.lower()
            results = []
            
            # 优化的关键词检索
            # 1. 先提取查询中的关键词
            query_words = set(query_lower.split())
            if not query_words:
                return []
            
            # 2. 对每个文档的每个块进行关键词匹配，但使用更高效的方法
            for doc in self.documents:
                for i, chunk in enumerate(doc["chunks"]):
                    # 快速检查是否有任何查询词出现在chunk中
                    if any(word in chunk.lower() for word in query_words):
                        # 计算匹配的关键词数量
                        chunk_words = set(chunk.lower().split())
                        common_words = query_words.intersection(chunk_words)
                        
                        # 简单的相似度分数（匹配的关键词比例）
                        score = len(common_words) / max(len(query_words), 1)
                        results.append({
                            "content": chunk,
                            "metadata": doc["metadata"],
                            "score": score
                        })
            
            # 按分数排序并取前k个结果
            results.sort(key=lambda x: x["score"], reverse=True)
            top_results = results[:top_k]
            
            # 更新缓存，使用LRU策略
            if len(self._query_cache) >= self._cache_size:
                # 移除最早的缓存项
                oldest_key = next(iter(self._query_cache))
                del self._query_cache[oldest_key]
            self._query_cache[cache_key] = top_results
            
            return top_results
        except Exception as e:
            print(f"检索失败: {str(e)}")
            return []
    
    def get_vector_store_info(self) -> Dict[str, Any]:
        """获取文档存储信息"""
        total_chunks = sum(doc["total_chunks"] for doc in self.documents)
        return {
            "initialized": self.__class__._initialized,
            "document_count": self.document_count,
            "total_chunks": total_chunks,
            "has_documents": len(self.documents) > 0
        }
        
    def add_documents_from_folder(self, folder_path: str) -> Dict[str, Any]:
        """从文件夹添加多个文档"""
        result = {
            "success": False,
            "added_count": 0,
            "total_count": 0,
            "errors": []
        }
        
        try:
            if not os.path.exists(folder_path):
                result["errors"].append(f"文件夹不存在: {folder_path}")
                return result
            
            # 获取所有文本文件
            text_files = []
            for root, dirs, files in os.walk(folder_path):
                for file in files:
                    if file.endswith(".txt"):
                        text_files.append(os.path.join(root, file))
            
            result["total_count"] = len(text_files)
            
            # 处理每个文件
            for file_path in text_files:
                try:
                    with open(file_path, "r", encoding="utf-8") as f:
                        content = f.read()
                        
                    metadata = {
                        "source": file_path,
                        "filename": os.path.basename(file_path)
                    }
                    
                    if self.add_document(content, metadata):
                        result["added_count"] += 1
                except Exception as e:
                    result["errors"].append(f"处理文件 {file_path} 失败: {str(e)}")
            
            result["success"] = result["added_count"] > 0
            return result
        except Exception as e:
            result["errors"].append(f"处理文件夹失败: {str(e)}")
            return result

    def add_sample_documents(self):
        """添加示例文档"""
        samples = [
            {
                "text": "RAG（检索增强生成）是一种结合了检索和生成式AI的技术。它通过从外部知识库检索相关信息，并将这些信息作为上下文提供给生成模型，从而增强模型的回答能力。RAG的主要优势在于能够利用最新的、特定领域的知识，而不需要重新训练整个模型。",
                "metadata": {"source": "sample_rag_intro.txt", "title": "RAG技术简介"}
            },
            {
                "text": "向量数据库是RAG系统中的重要组成部分。它用于存储和检索文本的向量表示，通过计算向量相似度来找到与查询最相关的文档片段。常见的向量数据库包括FAISS、Milvus、Pinecone等。在RAG系统中，通常使用嵌入模型将文本转换为向量。",
                "metadata": {"source": "sample_vector_db.txt", "title": "向量数据库在RAG中的应用"}
            },
            {
                "text": "在构建RAG系统时，文本分割是一个关键步骤。过大的文本块可能包含不相关的信息，而过小的文本块可能缺乏完整的上下文。常用的文本分割策略包括按字符数分割、按段落分割、按语义分割等。合适的分割策略可以显著提高RAG系统的检索和生成质量。",
                "metadata": {"source": "sample_text_splitting.txt", "title": "RAG中的文本分割技术"}
            }
        ]
        
        added_count = 0
        for sample in samples:
            if self.add_document(sample["text"], sample["metadata"]):
                added_count += 1
        
        return added_count

# 获取RAG服务实例
def get_rag_service():
    return RAGService()