import os
import re
import base64
from typing import List, Dict, Any, Optional
import warnings

# 抑制警告
warnings.filterwarnings("ignore", message="Could get FontBBox from font descriptor because None cannot be parsed as 4 floats")


class BossRAGRetriever:
    """Boss直聘平台使用指南RAG检索器"""
    
    def __init__(self, pdf_path: str = "Boss 直聘平台使用指南.pdf"):
        self.pdf_path = pdf_path
        self.documents = []
        self.chapters = []
        self.tables = []
        self.images = []
        self.max_chunk_size = 200
        
    def load_document(self):
        """加载PDF文档"""
        try:
            import pdfplumber
            
            if not os.path.exists(self.pdf_path):
                print(f"文件 {self.pdf_path} 不存在")
                return False
                
            with pdfplumber.open(self.pdf_path) as pdf:
                for i, page in enumerate(pdf.pages):
                    page_text = page.extract_text()
                    if page_text:
                        self.documents.append({
                            'page_content': page_text,
                            'metadata': {'page': i + 1, 'source': self.pdf_path}
                        })
            
            print(f"✅ 成功加载 {len(self.documents)} 页文档")
            return True
            
        except ImportError:
            print("请安装pdfplumber库: pip install pdfplumber")
            return False
        except Exception as e:
            print(f"加载文档时出错: {e}")
            return False
    
    def extract_chapters(self):
        """提取章节信息，总字数限制为200字"""
        if not self.documents:
            return []
            
        # 收集所有章节
        all_chapters = []
        current_chapter = None
        
        for doc in self.documents:
            content = doc['page_content']
            page_num = doc['metadata']['page']
            
            # 查找章节标题
            chapter_patterns = [
                r'第[一二三四五六七八九十\d]+章\s*([^\n]+)',
                r'第[一二三四五六七八九十\d]+节\s*([^\n]+)',
                r'(\d+\.\d+)\s*([^\n]+)',
                r'(\d+\.)\s*([^\n]+)'
            ]
            
            for pattern in chapter_patterns:
                matches = re.finditer(pattern, content)
                for match in matches:
                    if current_chapter:
                        all_chapters.append(current_chapter)
                    
                    chapter_title = match.group(0).strip()
                    chapter_content = content[match.end():].strip()
                    
                    current_chapter = {
                        'title': chapter_title,
                        'content': chapter_content,
                        'page': page_num
                    }
                    break
                
                if current_chapter:
                    break
        
        if current_chapter:
            all_chapters.append(current_chapter)
        
        # 合并所有章节内容，限制总字数为200字
        total_content = ""
        for chapter in all_chapters:
            total_content += chapter['title'] + "\n" + chapter['content'] + "\n\n"
        
        # 智能截断到200字
        if len(total_content) > 200:
            truncated_content = self._truncate_text(total_content, 200)
        else:
            truncated_content = total_content
        
        # 创建章节摘要
        summary_chapter = {
            'title': '文档摘要',
            'content': truncated_content,
            'page': 1,
            'char_count': len(truncated_content),
            'total_chapters': len(all_chapters),
            'original_length': len(total_content)
        }
        
        self.chapters = [summary_chapter]
        print(f"✅ 提取到 {len(all_chapters)} 个原始章节，总字数限制为200字")
        return self.chapters
    
    def _truncate_text(self, text: str, max_chars: int) -> str:
        """智能截断文本"""
        if len(text) <= max_chars:
            return text
            
        truncated = text[:max_chars]
        # 在句号、问号、感叹号处截断
        last_punctuation = max(
            truncated.rfind('。'),
            truncated.rfind('？'),
            truncated.rfind('！'),
            truncated.rfind('.'),
            truncated.rfind('?'),
            truncated.rfind('!')
        )
        
        if last_punctuation > max_chars * 0.8:
            return truncated[:last_punctuation + 1]
        
        return truncated + "..."
    
    def extract_tables(self):
        """提取表格数据"""
        try:
            import pdfplumber
            
            if not os.path.exists(self.pdf_path):
                return []
            
            tables = []
            with pdfplumber.open(self.pdf_path) as pdf:
                for page_num, page in enumerate(pdf.pages):
                    page_tables = page.extract_tables()
                    
                    for table_index, table_data in enumerate(page_tables):
                        if table_data and len(table_data) > 1:
                            tables.append({
                                'page': page_num + 1,
                                'table_index': table_index + 1,
                                'data': table_data,
                                'headers': table_data[0] if table_data else [],
                                'rows': table_data[1:] if len(table_data) > 1 else []
                            })
            
            self.tables = tables
            print(f"✅ 提取到 {len(tables)} 个表格")
            return tables
            
        except ImportError:
            print("请安装pdfplumber库: pip install pdfplumber")
            return []
        except Exception as e:
            print(f"提取表格时出错: {e}")
            return []
    
    def extract_images(self, output_dir: str = "extracted_images"):
        """提取图片数据"""
        try:
            import pdfplumber
            from PIL import Image
            
            if not os.path.exists(self.pdf_path):
                return []
            
            if not os.path.exists(output_dir):
                os.makedirs(output_dir)
            
            images = []
            with pdfplumber.open(self.pdf_path) as pdf:
                for page_num, page in enumerate(pdf.pages):
                    page_images = page.images
                    
                    for img_index, img_info in enumerate(page_images):
                        try:
                            if isinstance(img_info, dict):
                                bbox = img_info.get('bbox', [0, 0, 100, 100])
                                width = img_info.get('width', 100)
                                height = img_info.get('height', 100)
                            else:
                                bbox = [0, 0, 100, 100]
                                width = 100
                                height = 100
                            
                            # 提取图片
                            img = page.crop(bbox)
                            img_obj = img.to_image()
                            
                            # 保存图片
                            img_name = f"page_{page_num + 1}_img_{img_index + 1}.png"
                            img_path = os.path.join(output_dir, img_name)
                            img_obj.save(img_path)
                            
                            # 获取Base64编码
                            base64_image = self._get_image_base64(img_path)
                            
                            images.append({
                                'page': page_num + 1,
                                'index': img_index + 1,
                                'filename': img_name,
                                'path': img_path,
                                'size': os.path.getsize(img_path) if os.path.exists(img_path) else 0,
                                'width': width,
                                'height': height,
                                'base64': base64_image
                            })
                            
                        except Exception as e:
                            print(f"处理图片时出错: {e}")
                            continue
            
            self.images = images
            print(f"✅ 提取到 {len(images)} 张图片")
            return images
            
        except ImportError:
            print("请安装pdfplumber和Pillow库: pip install pdfplumber pillow")
            return []
        except Exception as e:
            print(f"提取图片时出错: {e}")
            return []
    
    def _get_image_base64(self, image_path: str) -> Optional[str]:
        """获取图片的Base64编码"""
        try:
            with open(image_path, "rb") as image_file:
                encoded_string = base64.b64encode(image_file.read()).decode('utf-8')
                return encoded_string
        except Exception as e:
            print(f"转换图片到base64时出错: {e}")
            return None
    
    def search_text(self, query: str, limit: int = 3) -> List[Dict]:
        """文本检索"""
        if not self.documents:
            return []
        
        results = []
        query_lower = query.lower()
        
        for doc in self.documents:
            content = doc['page_content']
            page_num = doc['metadata']['page']
            
            if query_lower in content.lower():
                start_pos = content.lower().find(query_lower)
                context_start = max(0, start_pos - 100)
                context_end = min(len(content), start_pos + len(query_lower) + 100)
                context = content[context_start:context_end]
                
                results.append({
                    'type': 'text',
                    'content': context,
                    'page': page_num,
                    'relevance_score': 1.0,
                    'source': 'document'
                })
        
        return results[:limit]
    
    def search_chapters(self, query: str, limit: int = 3) -> List[Dict]:
        """章节检索"""
        if not self.chapters:
            return []
        
        results = []
        query_lower = query.lower()
        
        for chapter in self.chapters:
            title = chapter['title']
            content = chapter['content']
            
            score = 0
            if query_lower in title.lower():
                score += 2.0
            if query_lower in content.lower():
                score += 1.0
            
            if score > 0:
                results.append({
                    'type': 'chapter',
                    'title': title,
                    'content': content,
                    'page': chapter['page'],
                    'relevance_score': score,
                    'source': 'chapter'
                })
        
        results.sort(key=lambda x: x['relevance_score'], reverse=True)
        return results[:limit]
    
    def search_tables(self, query: str, limit: int = 3) -> List[Dict]:
        """表格检索"""
        if not self.tables:
            return []
        
        results = []
        query_lower = query.lower()
        
        for table in self.tables:
            headers = table['headers']
            table_text = ' '.join([str(cell) for row in table['data'] for cell in row])
            
            if query_lower in table_text.lower():
                results.append({
                    'type': 'table',
                    'headers': headers,
                    'data': table['data'],
                    'page': table['page'],
                    'relevance_score': 1.0,
                    'source': 'table'
                })
        
        return results[:limit]
    
    def search_images(self, query: str, limit: int = 3) -> List[Dict]:
        """图片检索"""
        if not self.images:
            return []
        
        results = []
        query_lower = query.lower()
        
        # 关键词匹配
        image_keywords = {
            '封面': ['封面', '标题', '品牌'],
            '目录': ['目录', '索引'],
            '流程图': ['流程', '步骤', '操作'],
            '界面': ['界面', '截图', '屏幕'],
            '表格': ['表格', '数据', '统计']
        }
        
        for img in self.images:
            score = 0
            matched_keywords = []
            
            # 页码匹配
            if f"第{img['page']}页" in query or f"第{img['page']}" in query:
                score += 2.0
                matched_keywords.append(f"第{img['page']}页")
            
            # 关键词匹配
            for keyword, synonyms in image_keywords.items():
                if any(synonym in query_lower for synonym in synonyms):
                    score += 1.0
                    matched_keywords.append(keyword)
            
            if score > 0:
                results.append({
                    'type': 'image',
                    'filename': img['filename'],
                    'path': img['path'],
                    'page': img['page'],
                    'size': img['size'],
                    'width': img['width'],
                    'height': img['height'],
                    'base64': img['base64'],
                    'matched_keywords': matched_keywords,
                    'relevance_score': score,
                    'source': 'image'
                })
        
        results.sort(key=lambda x: x['relevance_score'], reverse=True)
        return results[:limit]
    
    def hybrid_search(self, query: str, limit_per_type: int = 2) -> Dict[str, List[Dict]]:
        """混合检索"""
        results = {
            'text': self.search_text(query, limit_per_type),
            'chapters': self.search_chapters(query, limit_per_type),
            'tables': self.search_tables(query, limit_per_type),
            'images': self.search_images(query, limit_per_type)
        }
        
        return results
    
    def display_chapters(self):
        """展示章节摘要（总字数限制为200字）"""
        if not self.chapters:
            print("❌ 没有可展示的章节")
            return
        
        print("\n" + "="*80)
        print("📚 章节摘要展示 (总字数限制为200字)")
        print("="*80)
        
        for chapter in self.chapters:
            print(f"\n📖 {chapter['title']}")
            print(f"   当前字数: {chapter['char_count']}字")
            if 'original_length' in chapter:
                print(f"   原始字数: {chapter['original_length']}字")
            if 'total_chapters' in chapter:
                print(f"   包含章节数: {chapter['total_chapters']}个")
            print("-" * 60)
            print(chapter['content'])
            print("-" * 60)
    
    def initialize(self):
        """初始化RAG检索器"""
        print("🚀 初始化Boss RAG检索器...")
        
        # 加载文档
        if not self.load_document():
            return False
        
        # 提取章节
        self.extract_chapters()
        
        # 提取表格
        self.extract_tables()
        
        # 提取图片
        self.extract_images()
        
        print("✅ RAG检索器初始化完成")
        return True


def main():
    """主函数 - 演示RAG混合检索功能"""
    # 初始化RAG检索器
    rag = BossRAGRetriever()
    
    if not rag.initialize():
        print("❌ RAG检索器初始化失败")
        return
    
    # 展示章节摘要结果
    rag.display_chapters()
    
    # 测试查询
    test_queries = [
        "Boss直聘平台概述",
        "企业端招聘操作",
        "实时沟通功能",
        "第2页的图片"
    ]
    
    print("\n" + "="*60)
    print("🎯 RAG混合检索演示")
    print("="*60)
    
    for query in test_queries:
        print(f"\n❓ 查询: {query}")
        print("-" * 40)
        
        results = rag.hybrid_search(query, limit_per_type=1)
        
        # 显示文本结果
        if results['text']:
            print("📄 文本结果:")
            for result in results['text']:
                print(f"   第{result['page']}页: {result['content'][:100]}...")
        
        # 显示章节结果
        if results['chapters']:
            print("📚 章节结果:")
            for result in results['chapters']:
                print(f"   {result['title']} (第{result['page']}页)")
                print(f"   {result['content'][:100]}...")
        
        # 显示表格结果
        if results['tables']:
            print("📊 表格结果:")
            for result in results['tables']:
                print(f"   第{result['page']}页表格: {result['headers']}")
        
        # 显示图片结果
        if results['images']:
            print("🖼️ 图片结果:")
            for result in results['images']:
                print(f"   第{result['page']}页: {result['filename']}")
                print(f"   匹配关键词: {', '.join(result['matched_keywords'])}")


if __name__ == "__main__":
    main()