import os
import PyPDF2
from typing import List, Dict
import re
from sentence_transformers import SentenceTransformer
from pymilvus import connections, Collection, CollectionSchema, FieldSchema, DataType, utility

class PDFQASystem:
    def __init__(self, model_name='all-MiniLM-L6-v2', collection_name='pdf_qa_collection'):
        """
        初始化PDF问答系统
        
        Args:
            model_name: Sentence-BERT模型名称
            collection_name: Milvus集合名称
        """
        print("正在初始化模型和数据库连接...")
        
        # 设置环境变量以控制模型缓存位置
        cache_dir = os.path.join(os.getcwd(), "models")
        os.makedirs(cache_dir, exist_ok=True)
        os.environ['TRANSFORMERS_CACHE'] = cache_dir
        os.environ['HF_HOME'] = cache_dir
        
        try:
            # 加载模型
            self.model = SentenceTransformer(model_name, cache_folder=cache_dir)
            print(f"模型 {model_name} 加载成功")
        except Exception as e:
            print(f"模型加载失败: {str(e)}")
            # 尝试使用备用模型
            backup_model = 'paraphrase-MiniLM-L3-v2'
            print(f"尝试加载备用模型: {backup_model}")
            self.model = SentenceTransformer(backup_model, cache_folder=cache_dir)
        
        self.collection_name = collection_name
        self.vector_dim = self.model.get_sentence_embedding_dimension()
        
        # 确保milvus_data目录存在
        milvus_path = os.path.join(os.getcwd(), "milvus_data")
        os.makedirs(milvus_path, exist_ok=True)
        
        # 设置数据库文件路径
        db_path = os.path.join(milvus_path, "milvus.db")
        
        # 连接到Milvus Lite
        connections.connect(
            alias="default", 
            uri=db_path,
            local_path=milvus_path
        )
        
        # 创建或获取集合
        self._init_collection()
    
    def _init_collection(self):
        """初始化Milvus集合"""
        if utility.has_collection(self.collection_name):
            utility.drop_collection(self.collection_name)
            
        # 定义集合字段 - 修改为使用JSON类型的metadata字段
        fields = [
            FieldSchema(name="id", dtype=DataType.INT64, is_primary=True, auto_id=True),
            FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=2000),
            FieldSchema(name="metadata", dtype=DataType.JSON),  # 使用JSON类型存储source和page信息
            FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=self.vector_dim)
        ]
        
        # 创建集合schema
        schema = CollectionSchema(fields=fields, description="PDF文档问答向量集合")
        
        # 创建集合
        self.collection = Collection(name=self.collection_name, schema=schema)
        
        # 创建索引
        index_params = {
            "metric_type": "COSINE",
            "index_type": "HNSW",
            "params": {
                "M": 8,
                "efConstruction": 64
            }
        }
        self.collection.create_index(
            field_name="embedding",
            index_params=index_params,
            index_name="embedding_index"
        )
        print(f"已创建新集合: {self.collection_name}")
        
        # 加载集合
        self.collection.load()

    def extract_text_from_pdf(self, pdf_path: str) -> List[Dict]:
        """
        从PDF文件中提取文本并分段，包含预处理和清理步骤
        
        Args:
            pdf_path: PDF文件路径
            
        Returns:
            List[Dict]: 包含文本段落、页码和来源信息的字典列表
        """
        print(f"正在处理PDF文件: {pdf_path}")
        text_segments = []
        
        try:
            with open(pdf_path, 'rb') as file:
                pdf_reader = PyPDF2.PdfReader(file)
                
                # 获取总页数
                total_pages = len(pdf_reader.pages)
                
                # 跳过目录页（假设前2页是目录）
                start_page = 2 if total_pages > 3 else 0
                
                # 存储上一页的最后一段文本，用于处理跨页内容
                last_segment = ""
                
                # 遍历每一页
                for page_num in range(start_page, total_pages):
                    page = pdf_reader.pages[page_num]
                    text = page.extract_text()
                    
                    # 清理和预处理文本
                    text = self._preprocess_text(text)
                    
                    # 如果有上一页的末尾段落，尝试与当前页开头合并
                    if last_segment and text:
                        combined_text = last_segment + " " + text.split('\n')[0]
                        if len(combined_text) < 2000:  # 设置合理的长度限制
                            text_segments.append({
                                'text': combined_text.strip(),
                                'page': page_num,  # 记录跨页
                                'source': os.path.basename(pdf_path)
                            })
                    
                    # 分段处理当前页文本
                    segments = self._split_text_into_segments(text)
                    
                    # 保存最后一段用于下一页处理
                    if segments:
                        last_segment = segments[-1]
                        segments = segments[:-1]  # 移除最后一段，留待下一页处理
                    
                    # 添加页码和来源信息
                    for segment in segments:
                        if self._is_valid_segment(segment):
                            text_segments.append({
                                'text': segment.strip(),
                                'page': page_num + 1,
                                'source': os.path.basename(pdf_path)
                            })
                
        except Exception as e:
            print(f"处理PDF文件时出错: {str(e)}")
            
        return text_segments

    def _preprocess_text(self, text: str) -> str:
        """
        预处理和清理文本
        """
        if not text:
            return ""
        
        # 基础清理
        text = text.strip()
        
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        
        # 移除页眉页脚（假设页眉页脚不超过50个字符）
        lines = text.split('\n')
        if len(lines) > 2:
            if len(lines[0]) < 50:  # 可能是页眉
                lines = lines[1:]
            if len(lines[-1]) < 50:  # 可能是页脚
                lines = lines[:-1]
        text = '\n'.join(lines)
        
        # 移除特殊字符和控制字符
        text = re.sub(r'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\xff]', '', text)
        
        # 移除重复的标点符号
        text = re.sub(r'[.,。，]{2,}', '.', text)
        
        return text

    def _split_text_into_segments(self, text: str, max_length: int = 1000) -> List[str]:
        """
        智能分段文本
        """
        if not text:
            return []
        
        # 首先按自然段落分割
        paragraphs = text.split('\n\n')
        
        segments = []
        current_segment = ""
        
        for paragraph in paragraphs:
            # 清理段落文本
            paragraph = re.sub(r'\s+', ' ', paragraph).strip()
            if not paragraph:
                continue
            
            # 如果段落过长，按句子切分
            if len(paragraph) > max_length:
                sentences = re.split(r'([。！？.!?])', paragraph)
                temp_segment = ""
                
                for i in range(0, len(sentences), 2):
                    sentence = sentences[i]
                    if i + 1 < len(sentences):
                        sentence += sentences[i + 1]  # 加回标点符号
                    
                    if len(temp_segment) + len(sentence) <= max_length:
                        temp_segment = (temp_segment + " " + sentence).strip()
                    else:
                        if temp_segment:
                            segments.append(temp_segment)
                        temp_segment = sentence
                
                if temp_segment:
                    segments.append(temp_segment)
            else:
                # 如果当前段落加上新段落不超过最大长度，则合并
                if len(current_segment) + len(paragraph) + 1 <= max_length:
                    current_segment = (current_segment + " " + paragraph).strip()
                else:
                    if current_segment:
                        segments.append(current_segment)
                    current_segment = paragraph
        
        # 添加最后一个段落
        if current_segment:
            segments.append(current_segment)
        
        return segments

    def _is_valid_segment(self, segment: str) -> bool:
        """
        判断文本段落是否有效
        """
        if not segment or len(segment.strip()) < 10:  # 过短的文本
            return False
        
        # 过滤目录类文本（包含大量的...或数字）
        if re.search(r'\.{3,}', segment) or \
           re.search(r'\d+\s*\.\s*\d+\s*\.\s*\d+', segment):
            return False
        
        # 过滤页眉页脚类文本
        if len(segment) < 50 and (
            re.search(r'^\d+$', segment) or  # 纯数字
            re.search(r'^第\s*\d+\s*页$', segment) or  # 页码
            re.search(r'^第\s*\d+\s*章', segment)  # 章节标题
        ):
            return False
        
        return True

    def insert_documents(self, text_segments: List[Dict]):
        """将文档片段转换为向量并插入Milvus"""
        if not text_segments:
            print("没有文本片段需要处理")
            return
            
        print(f"正在处理 {len(text_segments)} 个文本片段...")
        
        # 提取文本列表
        texts = [segment['text'] for segment in text_segments]
        
        # 编码文本
        embeddings = self.model.encode(texts)
        embeddings = embeddings.astype('float32')
        
        # 准备插入数据
        data = [
            {
                'text': segment['text'],
                'metadata': {  # 将source和page放入metadata
                    'source': segment['source'],
                    'page': segment['page']
                },
                'embedding': embedding.tolist()
            }
            for segment, embedding in zip(text_segments, embeddings)
        ]
        
        # 插入数据
        self.collection.insert(data)
        self.collection.flush()
        print(f"已插入 {len(data)} 条数据")

    def search_similar_texts(self, query: str, top_k: int = 3) -> List[Dict]:
        """搜索与问题最相关的文本片段"""
        print(f"正在搜索与问题'{query}'最相关的{top_k}个片段...")
        
        # 加载集合
        self.collection.load()
        
        # 编码查询文本
        query_embedding = self.model.encode([query])
        query_embedding = query_embedding.astype('float32')
        
        # 搜索参数
        search_params = {
            "metric_type": "COSINE",
            "params": {"ef": 64}
        }
        
        # 执行搜索 - 使用正确的输出字段
        results = self.collection.search(
            data=[query_embedding[0].tolist()],
            anns_field="embedding",
            param=search_params,
            limit=top_k * 2,
            output_fields=["text", "metadata"]  # 修改为正确的字段名
        )
        
        # 整理结果
        similar_texts = []
        seen_pages = set()
        
        for hits in results:
            for hit in hits:
                metadata = hit.entity.get('metadata', {})
                page = metadata.get('page')
                source = metadata.get('source')
                
                # 如果是新的页面，获取上下文
                if (source, page) not in seen_pages:
                    seen_pages.add((source, page))
                    
                    context = {
                        'text': hit.entity.get('text'),
                        'source': source,
                        'page': page,
                        'similarity': hit.distance
                    }
                    
                    similar_texts.append(context)
                    
                    if len(similar_texts) >= top_k:
                        break
        
        # 按页码排序
        similar_texts.sort(key=lambda x: (x['source'], x['page']))
        
        # 释放集合
        self.collection.release()
        
        return similar_texts

def main():
    # 创建PDF问答系统实例
    qa_system = PDFQASystem(model_name='paraphrase-MiniLM-L3-v2')
    
    # 指定PDF文件路径
    pdf_dir = "pdfs"  # PDF文件目录
    if not os.path.exists(pdf_dir):
        os.makedirs(pdf_dir)
        print(f"请将PDF文件放入 {pdf_dir} 目录中")
        return
    
    # 处理目录中的所有PDF文件
    for filename in os.listdir(pdf_dir):
        if filename.lower().endswith('.pdf'):
            pdf_path = os.path.join(pdf_dir, filename)
            # 提取文本段落
            text_segments = qa_system.extract_text_from_pdf(pdf_path)
            # 插入向量数据库
            qa_system.insert_documents(text_segments)
    
    # 交互式问答
    print("\n现在可以开始提问了(输入'quit'退出):")
    while True:
        query = input("\n请输入您的问题: ").strip()
        if query.lower() == 'quit':
            break
            
        # 搜索相关文本
        results = qa_system.search_similar_texts(query, top_k=3)
        
        # 显示结果
        print("\n找到以下相关内容:")
        for idx, result in enumerate(results, 1):
            print(f"\n{idx}. 相关度: {result['similarity']:.4f}")
            print(f"来源: {result['source']} (第{result['page']}页)")
            print(f"内容: {result['text']}")

if __name__ == "__main__":
    main() 