#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
离线处理模块：文件加载 -> 读取 -> 切割 -> 向量化 -> 存储
"""

import os
import json
import pickle
import pdfplumber
import chromadb
from typing import List, Dict, Any
from langchain.text_splitter import RecursiveCharacterTextSplitter
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
from rag.emm.text_rag import TextEmbedding
import logging

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class OfflineProcessor:
    """离线处理器"""
    
    def __init__(self, 
                 pdf_path: str = "论人与自然的相处之道.pdf",
                 chunk_size: int = 500,
                 chunk_overlap: int = 50,
                 collection_name: str = "nature_human_relationship"):
        """
        初始化离线处理器
        
        Args:
            pdf_path: PDF文件路径
            chunk_size: 文本块大小
            chunk_overlap: 文本块重叠大小
            collection_name: ChromaDB集合名称
        """
        self.pdf_path = pdf_path
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.collection_name = collection_name
        
        # 初始化组件
        self.text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=chunk_size,
            chunk_overlap=chunk_overlap,
            separators=["\n\n", "\n", "。", "，", " ", ""]
        )
        self.embedding_model = TextEmbedding()
        
        # 初始化ChromaDB
        self.chroma_client = chromadb.PersistentClient(path="./chromadb_nature")
        self.collection = None
        
        # 元数据存储
        self.metadata_file = "document_metadata.json"
        
    def load_and_read_pdf(self) -> str:
        """
        步骤1: 加载并读取PDF文件
        
        Returns:
            str: 提取的文本内容
        """
        logger.info(f"开始加载PDF文件: {self.pdf_path}")
        
        if not os.path.exists(self.pdf_path):
            raise FileNotFoundError(f"PDF文件不存在: {self.pdf_path}")
        
        try:
            text_content = ""
            with pdfplumber.open(self.pdf_path) as pdf:
                logger.info(f"PDF总页数: {len(pdf.pages)}")
                
                for i, page in enumerate(pdf.pages):
                    page_text = page.extract_text()
                    if page_text:
                        text_content += page_text + "\n"
                        logger.info(f"已处理第{i+1}页，字符数: {len(page_text)}")
            
            logger.info(f"PDF读取完成，总字符数: {len(text_content)}")
            return text_content.strip()
            
        except Exception as e:
            logger.error(f"读取PDF文件失败: {e}")
            raise
    
    def split_text(self, text: str) -> List[Dict[str, Any]]:
        """
        步骤2: 文本切割
        
        Args:
            text: 原始文本
            
        Returns:
            List[Dict]: 切割后的文本块列表
        """
        logger.info("开始文本切割...")
        
        try:
            # 使用LangChain的文本分割器
            chunks = self.text_splitter.split_text(text)
            
            # 转换为字典格式，包含元数据
            chunk_docs = []
            for i, chunk in enumerate(chunks):
                chunk_doc = {
                    "id": f"chunk_{i}",
                    "text": chunk,
                    "chunk_index": i,
                    "chunk_size": len(chunk),
                    "source": self.pdf_path,
                    "metadata": {
                        "source": self.pdf_path,
                        "chunk_index": i,
                        "chunk_size": len(chunk),
                        "created_at": "2024-01-01"
                    }
                }
                chunk_docs.append(chunk_doc)
            
            logger.info(f"文本切割完成，共生成 {len(chunk_docs)} 个文本块")
            return chunk_docs
            
        except Exception as e:
            logger.error(f"文本切割失败: {e}")
            raise
    
    def vectorize_chunks(self, chunk_docs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
        """
        步骤3: 向量化文本块
        
        Args:
            chunk_docs: 文本块列表
            
        Returns:
            List[Dict]: 包含向量的文本块列表
        """
        logger.info("开始向量化文本块...")
        
        try:
            for i, chunk_doc in enumerate(chunk_docs):
                # 生成向量
                embeddings = self.embedding_model.generate_embeddings(chunk_doc["text"])
                embedding = embeddings[0] if embeddings else []
                
                # 确保embedding是列表格式
                if isinstance(embedding, list) and len(embedding) > 0:
                    if isinstance(embedding[0], list):
                        embedding = embedding[0]  # 取第一个元素
                
                chunk_doc["embedding"] = embedding
                logger.info(f"已向量化第{i+1}个文本块，向量维度: {len(embedding)}")
            
            logger.info("向量化完成")
            return chunk_docs
            
        except Exception as e:
            logger.error(f"向量化失败: {e}")
            raise
    
    def store_vectors(self, chunk_docs: List[Dict[str, Any]]) -> None:
        """
        步骤4: 向量存储到ChromaDB
        
        Args:
            chunk_docs: 包含向量的文本块列表
        """
        logger.info("开始存储向量到ChromaDB...")
        
        try:
            # 创建或获取集合
            try:
                self.collection = self.chroma_client.get_collection(self.collection_name)
                logger.info(f"获取现有集合: {self.collection_name}")
            except:
                self.collection = self.chroma_client.create_collection(
                    name=self.collection_name,
                    metadata={"description": "人与自然相处之道文档向量存储"}
                )
                logger.info(f"创建新集合: {self.collection_name}")
            
            # 准备数据
            ids = [doc["id"] for doc in chunk_docs]
            texts = [doc["text"] for doc in chunk_docs]
            embeddings = [doc["embedding"] for doc in chunk_docs]
            metadatas = [doc["metadata"] for doc in chunk_docs]
            
            # 批量添加到ChromaDB
            self.collection.add(
                ids=ids,
                documents=texts,
                embeddings=embeddings,
                metadatas=metadatas
            )
            
            logger.info(f"成功存储 {len(chunk_docs)} 个向量到ChromaDB")
            
            # 保存元数据到文件
            self.save_metadata(chunk_docs)
            
        except Exception as e:
            logger.error(f"向量存储失败: {e}")
            raise
    
    def save_metadata(self, chunk_docs: List[Dict[str, Any]]) -> None:
        """
        保存文档元数据到文件
        
        Args:
            chunk_docs: 文本块列表
        """
        try:
            metadata = {
                "source_file": self.pdf_path,
                "total_chunks": len(chunk_docs),
                "chunk_size": self.chunk_size,
                "chunk_overlap": self.chunk_overlap,
                "collection_name": self.collection_name,
                "chunks_info": [
                    {
                        "id": doc["id"],
                        "chunk_index": doc["chunk_index"],
                        "chunk_size": doc["chunk_size"],
                        "text_preview": doc["text"][:100] + "..." if len(doc["text"]) > 100 else doc["text"]
                    }
                    for doc in chunk_docs
                ]
            }
            
            with open(self.metadata_file, "w", encoding="utf-8") as f:
                json.dump(metadata, f, ensure_ascii=False, indent=2)
            
            logger.info(f"元数据已保存到: {self.metadata_file}")
            
        except Exception as e:
            logger.error(f"保存元数据失败: {e}")
    
    def process_offline(self) -> Dict[str, Any]:
        """
        执行完整的离线处理流程
        
        Returns:
            Dict: 处理结果统计
        """
        logger.info("=" * 50)
        logger.info("开始离线处理流程")
        logger.info("=" * 50)
        
        try:
            # 步骤1: 加载并读取PDF
            text_content = self.load_and_read_pdf()
            
            # 步骤2: 文本切割
            chunk_docs = self.split_text(text_content)
            
            # 步骤3: 向量化
            chunk_docs = self.vectorize_chunks(chunk_docs)
            
            # 步骤4: 向量存储
            self.store_vectors(chunk_docs)
            
            # 统计信息
            result = {
                "status": "success",
                "source_file": self.pdf_path,
                "total_chunks": len(chunk_docs),
                "total_characters": len(text_content),
                "chunk_size": self.chunk_size,
                "chunk_overlap": self.chunk_overlap,
                "collection_name": self.collection_name,
                "metadata_file": self.metadata_file
            }
            
            logger.info("=" * 50)
            logger.info("离线处理完成!")
            logger.info(f"处理结果: {result}")
            logger.info("=" * 50)
            
            return result
            
        except Exception as e:
            logger.error(f"离线处理失败: {e}")
            raise
    
    def get_collection_info(self) -> Dict[str, Any]:
        """
        获取集合信息
        
        Returns:
            Dict: 集合信息
        """
        try:
            if not self.collection:
                self.collection = self.chroma_client.get_collection(self.collection_name)
            
            count = self.collection.count()
            return {
                "collection_name": self.collection_name,
                "total_documents": count,
                "collection_metadata": self.collection.metadata
            }
        except Exception as e:
            logger.error(f"获取集合信息失败: {e}")
            return {"error": str(e)}


def main():
    """主函数：执行离线处理"""
    processor = OfflineProcessor()
    result = processor.process_offline()
    print(f"处理结果: {result}")
    
    # 显示集合信息
    info = processor.get_collection_info()
    print(f"集合信息: {info}")


if __name__ == "__main__":
    main()
