# -*- coding: utf-8 -*-
"""
RAG向量知识库构建模块
基于ChromaDB和sentence-transformers构建佛山大学知识库
"""

import json
import os
from pathlib import Path
from typing import List, Dict, Any, Optional
from sentence_transformers import SentenceTransformer
import chromadb
from chromadb.config import Settings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
import numpy as np
from tqdm import tqdm
import time
import torch

class UniversityRAGBuilder:
    """佛山大学RAG知识库构建器"""
    
    def __init__(
        self,
        rag_data_path: str,
        vector_store_path: str = "./university_knowledge",
        embedding_model: str = "./models/m3e-base",
        chunk_size: int = 512,
        chunk_overlap: int = 50
    ):
        self.rag_data_path = Path(rag_data_path)
        self.vector_store_path = Path(vector_store_path)
        self.embedding_model_name = embedding_model
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        
        # 初始化组件
        self.embeddings = None
        self.vectorstore = None
        self.text_splitter = None
        self.rag_documents = []
        
        # 创建目录
        self.vector_store_path.mkdir(exist_ok=True)
    
    def initialize_components(self):
        """初始化embedding模型和向量数据库"""
        print("初始化RAG组件...")
        
        # 初始化embedding模型
        print(f"加载embedding模型: {self.embedding_model_name}")
        self.embeddings = HuggingFaceEmbeddings(
            model_name=self.embedding_model_name,
            model_kwargs={'device': 'mps' if hasattr(torch, 'backends') and torch.backends.mps.is_available() else 'cpu'},
            encode_kwargs={'normalize_embeddings': True}
        )
        
        # 初始化文本分割器
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=self.chunk_size,
            chunk_overlap=self.chunk_overlap,
            length_function=len,
            separators=["\n\n", "\n", "。", "！", "？", "；", " "]
        )
        
        print("RAG组件初始化完成")
    
    def load_rag_data(self):
        """加载RAG数据"""
        rag_file = self.rag_data_path / "foshan_university_rag_go.json"
        
        if not rag_file.exists():
            raise FileNotFoundError(f"RAG数据文件不存在: {rag_file}")
        
        print(f"加载RAG数据: {rag_file}")
        with open(rag_file, 'r', encoding='utf-8') as f:
            self.rag_documents = json.load(f)
        
        print(f"加载完成: {len(self.rag_documents)} 个文档块")
    
    def process_documents(self) -> List[Dict[str, Any]]:
        """处理文档，准备向量化"""
        processed_docs = []
        
        print("处理文档...")
        for doc in tqdm(self.rag_documents, desc="处理文档"):
            # 提取文本内容
            text_chunk = doc.get('text_chunk', '').strip()
            if not text_chunk or len(text_chunk) < 10:
                continue
            
            # 提取元数据
            metadata = doc.get('metadata', {})
            
            # 构建处理后的文档
            processed_doc = {
                'id': doc.get('chunk_id', ''),
                'text': text_chunk,
                'metadata': {
                    'source_doc': metadata.get('source_doc', ''),
                    'source_url': metadata.get('source_url', ''),
                    'title': metadata.get('title', ''),
                    'category': metadata.get('category', ''),
                    'chunk_type': metadata.get('chunk_type', ''),
                    'chunk_index': metadata.get('chunk_index', 0)
                }
            }
            
            processed_docs.append(processed_doc)
        
        print(f"文档处理完成: {len(processed_docs)} 个有效文档")
        return processed_docs
    
    def build_vector_store(self, batch_size: int = 100):
        """构建向量数据库"""
        if self.embeddings is None:
            self.initialize_components()
        
        # 处理文档
        documents = self.process_documents()
        
        print("开始构建向量数据库...")
        
        # 准备文本和元数据
        texts = [doc['text'] for doc in documents]
        metadatas = [doc['metadata'] for doc in documents]
        ids = [doc['id'] for doc in documents]
        
        # 分批处理以避免内存问题
        print(f"分批构建向量索引 (批大小: {batch_size})...")
        
        # 创建向量数据库
        vectorstore = Chroma(
            persist_directory=str(self.vector_store_path),
            embedding_function=self.embeddings
        )
        
        # 分批添加文档
        for i in tqdm(range(0, len(texts), batch_size), desc="构建向量索引"):
            batch_texts = texts[i:i + batch_size]
            batch_metadatas = metadatas[i:i + batch_size]
            batch_ids = ids[i:i + batch_size]
            
            try:
                vectorstore.add_texts(
                    texts=batch_texts,
                    metadatas=batch_metadatas,
                    ids=batch_ids
                )
            except Exception as e:
                print(f"批次 {i//batch_size + 1} 处理失败: {e}")
                continue
        
        # 持久化
        vectorstore.persist()
        self.vectorstore = vectorstore
        
        print(f"向量数据库构建完成，保存到: {self.vector_store_path}")
        return vectorstore
    
    def test_retrieval(self, query: str = "佛山大学的图书馆开放时间是什么？", k: int = 5):
        """测试检索功能"""
        if self.vectorstore is None:
            print("向量数据库未构建，请先调用 build_vector_store()")
            return
        
        print(f"\n测试查询: {query}")
        
        # 执行相似度搜索
        results = self.vectorstore.similarity_search_with_score(query, k=k)
        
        print(f"检索到 {len(results)} 个相关结果:")
        for i, (doc, score) in enumerate(results, 1):
            print(f"\n结果 {i} (相似度: {score:.4f}):")
            print(f"内容: {doc.page_content[:200]}...")
            print(f"元数据: {doc.metadata}")
    
    def load_existing_vectorstore(self):
        """加载已存在的向量数据库"""
        if not self.vector_store_path.exists():
            print(f"向量数据库不存在: {self.vector_store_path}")
            return None
        
        if self.embeddings is None:
            self.initialize_components()
        
        print(f"加载现有向量数据库: {self.vector_store_path}")
        self.vectorstore = Chroma(
            persist_directory=str(self.vector_store_path),
            embedding_function=self.embeddings
        )
        
        # 获取数据库统计信息
        collection = self.vectorstore._collection
        count = collection.count()
        print(f"向量数据库加载成功，包含 {count} 个文档")
        
        return self.vectorstore
    
    def get_retrieval_stats(self):
        """获取检索系统统计信息"""
        if self.vectorstore is None:
            return None
        
        collection = self.vectorstore._collection
        count = collection.count()
        
        stats = {
            "total_documents": count,
            "embedding_model": self.embedding_model_name,
            "chunk_size": self.chunk_size,
            "chunk_overlap": self.chunk_overlap,
            "vector_store_path": str(self.vector_store_path)
        }
        
        return stats

def main():
    """主函数"""
    # 配置路径
    rag_data_path = "./go-llm-cleaner/go_cleaned_data"
    vector_store_path = "./university_knowledge"
    
    # 创建RAG构建器
    rag_builder = UniversityRAGBuilder(
        rag_data_path=rag_data_path,
        vector_store_path=vector_store_path
    )
    
    try:
        # 加载数据
        rag_builder.load_rag_data()
        
        # 构建向量数据库
        vectorstore = rag_builder.build_vector_store(batch_size=50)
        
        # 测试检索
        test_queries = [
            "佛山大学的图书馆开放时间是什么？",
            "如何申请国家基金？",
            "学校的疫情防控措施有哪些？",
            "电子信息工程学院的师资情况如何？"
        ]
        
        for query in test_queries:
            rag_builder.test_retrieval(query, k=3)
        
        # 显示统计信息
        stats = rag_builder.get_retrieval_stats()
        print(f"\n=== RAG系统统计 ===")
        for key, value in stats.items():
            print(f"{key}: {value}")
        
        print("\n✅ RAG向量知识库构建完成！")
        
    except Exception as e:
        print(f"❌ 构建失败: {str(e)}")
        raise

if __name__ == "__main__":
    main()