#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
增强版文档处理器
提供更好的文档分割、结构化和知识库构建
"""

import re
from pathlib import Path
from typing import Dict, List, Optional
from sqlalchemy.orm import Session
from loguru import logger

from app.models.operation import OperationDocument, OperationCategory
from app.services.enhanced_pdf_parser import EnhancedPDFParser
from app.services.knowledge_service import KnowledgeService


class EnhancedDocumentProcessor:
    """增强版文档处理器"""
    
    def __init__(self):
        self.pdf_parser = EnhancedPDFParser()
        self.knowledge_service = KnowledgeService()
        
        # 文档分割配置
        self.max_chunk_size = 2000  # 最大块大小
        self.min_chunk_size = 200   # 最小块大小
        self.overlap_size = 100     # 重叠大小
    
    def process_document(self, db: Session, file_path: str) -> Dict:
        """处理文档文件"""
        try:
            file_path = Path(file_path)
            
            if not file_path.exists():
                raise FileNotFoundError(f"文件不存在: {file_path}")
            
            logger.info(f"开始处理文档: {file_path.name}")
            
            # 根据文件类型选择处理方法
            if file_path.suffix.lower() == '.pdf':
                result = self._process_enhanced_pdf(db, file_path)
            else:
                raise ValueError(f"不支持的文件格式: {file_path.suffix}")
            
            logger.info(f"文档处理完成: {file_path.name}")
            return result
            
        except Exception as e:
            logger.error(f"文档处理失败: {e}")
            raise
    
    def process_knowledge_base_directory(self, db: Session, directory_path: Optional[str] = None) -> Dict:
        """处理知识库目录中的所有文档"""
        from app.core.config import settings
        
        if directory_path is None:
            directory_path = settings.knowledge_base_path
        
        directory = Path(directory_path)
        
        if not directory.exists():
            raise FileNotFoundError(f"目录不存在: {directory}")
        
        logger.info(f"开始处理知识库目录: {directory}")
        
        processed_files = []
        failed_files = []
        total_documents_created = 0
        
        # 查找所有PDF文件
        pdf_files = list(directory.glob("*.pdf"))
        
        for pdf_file in pdf_files:
            try:
                logger.info(f"处理文件: {pdf_file.name}")
                result = self.process_document(db, str(pdf_file))
                
                processed_files.append({
                    "file_name": pdf_file.name,
                    "file_path": str(pdf_file),
                    "documents_created": len(result.get("documents_created", [])),
                    "processing_status": result.get("processing_status", "unknown")
                })
                
                total_documents_created += len(result.get("documents_created", []))
                
            except Exception as e:
                logger.error(f"处理文件失败 {pdf_file.name}: {e}")
                failed_files.append({
                    "file_name": pdf_file.name,
                    "file_path": str(pdf_file),
                    "error": str(e)
                })
        
        return {
            "processed_files": processed_files,
            "failed_files": failed_files,
            "total_documents_created": total_documents_created,
            "total_files_processed": len(processed_files),
            "total_files_failed": len(failed_files)
        }
    
    def _process_enhanced_pdf(self, db: Session, file_path: Path) -> Dict:
        """处理PDF文件（增强版）"""
        # 使用增强版解析器解析PDF
        parsed_data = self.pdf_parser.parse_pdf(str(file_path))
        
        # 保存解析结果
        output_file = self._save_parsed_data(parsed_data, file_path)
        
        # 创建结构化的操作文档记录
        documents_created = self._create_structured_documents(db, parsed_data)
        
        return {
            "file_path": str(file_path),
            "file_type": "pdf",
            "parsed_data_file": output_file,
            "summary": parsed_data["summary"],
            "document_structure": parsed_data["document_structure"],
            "documents_created": documents_created,
            "processing_status": "success"
        }
    
    def _create_structured_documents(self, db: Session, parsed_data: Dict) -> List[Dict]:
        """创建结构化的操作文档"""
        documents_created = []
        
        try:
            # 获取或创建分类
            category = self._get_or_create_category(
                db, 
                "银行零售贷款系统", 
                "银行零售贷款系统操作手册相关文档"
            )
            
            # 按功能模块创建文档
            functional_docs = self._create_functional_documents(db, parsed_data, category)
            documents_created.extend(functional_docs)
            
            # 按操作流程创建文档
            process_docs = self._create_process_documents(db, parsed_data, category)
            documents_created.extend(process_docs)
            
            # 创建表格文档
            table_docs = self._create_table_documents(db, parsed_data, category)
            documents_created.extend(table_docs)
            
            db.commit()
            logger.info(f"创建了 {len(documents_created)} 个结构化文档")
            
        except Exception as e:
            logger.error(f"创建结构化文档失败: {e}")
            db.rollback()
            raise
        
        return documents_created
    
    def _create_functional_documents(self, db: Session, parsed_data: Dict, category: OperationCategory) -> List[Dict]:
        """按功能模块创建文档"""
        documents = []
        
        # 按章节分组页面
        chapters = self._group_pages_by_chapter(parsed_data["pages"])
        
        for chapter_title, chapter_pages in chapters.items():
            # 合并章节内容
            chapter_content = self._merge_chapter_content(chapter_pages)
            
            if len(chapter_content.strip()) > self.min_chunk_size:
                # 分割长内容
                chunks = self._split_content_intelligently(chapter_content)
                
                for i, chunk in enumerate(chunks):
                    title = f"{chapter_title}"
                    if len(chunks) > 1:
                        title += f" - 第{i+1}部分"
                    
                    document = OperationDocument(
                        title=title,
                        description=f"来自{parsed_data['file_name']}的{chapter_title}章节内容",
                        content=chunk,
                        keywords=self._extract_smart_keywords(chunk),
                        platform="银行零售贷款系统",
                        difficulty_level=self._assess_difficulty_level(chunk),
                        risk_level=self._assess_risk_level(chunk),
                        category_id=category.id
                    )
                    
                    db.add(document)
                    db.flush()
                    
                    embedding_id = self.knowledge_service.add_document(db, document)
                    
                    documents.append({
                        "id": document.id,
                        "title": title,
                        "type": "functional",
                        "chapter": chapter_title,
                        "embedding_id": embedding_id
                    })
        
        return documents
    
    def _create_process_documents(self, db: Session, parsed_data: Dict, category: OperationCategory) -> List[Dict]:
        """按操作流程创建文档"""
        documents = []
        
        # 识别操作流程页面
        process_pages = []
        for page in parsed_data["pages"]:
            if page.get("page_type") == "操作说明页":
                process_pages.append(page)
        
        for page in process_pages:
            key_info = page.get("key_info", {})
            
            # 构建操作流程文档
            content_parts = []
            
            # 添加菜单路径
            if key_info.get("menu_paths"):
                content_parts.append("## 菜单路径")
                content_parts.extend(key_info["menu_paths"])
                content_parts.append("")
            
            # 添加操作步骤
            if key_info.get("operation_steps"):
                content_parts.append("## 操作步骤")
                content_parts.extend(key_info["operation_steps"])
                content_parts.append("")
            
            # 添加注意事项
            if key_info.get("warnings"):
                content_parts.append("## 注意事项")
                content_parts.extend(key_info["warnings"])
                content_parts.append("")
            
            # 添加其他内容
            content_parts.append("## 详细说明")
            content_parts.append(page.get("cleaned_text", ""))
            
            content = "\n".join(content_parts)
            
            if len(content.strip()) > self.min_chunk_size:
                # 生成标题
                title = self._extract_operation_title(page)
                
                document = OperationDocument(
                    title=title,
                    description=f"来自{parsed_data['file_name']}第{page['page_number']}页的操作流程",
                    content=content,
                    keywords=self._extract_smart_keywords(content),
                    platform="银行零售贷款系统",
                    difficulty_level=self._assess_difficulty_level(content),
                    risk_level=self._assess_risk_level(content),
                    category_id=category.id
                )
                
                db.add(document)
                db.flush()
                
                embedding_id = self.knowledge_service.add_document(db, document)
                
                documents.append({
                    "id": document.id,
                    "title": title,
                    "type": "process",
                    "page_number": page["page_number"],
                    "embedding_id": embedding_id
                })
        
        return documents
    
    def _create_table_documents(self, db: Session, parsed_data: Dict, category: OperationCategory) -> List[Dict]:
        """创建表格文档"""
        documents = []
        
        for table in parsed_data.get("tables", []):
            if table["text_representation"].strip():
                title = f"{parsed_data['file_name']} - 第{table['page_number']}页表格"
                
                # 构建表格文档内容
                content = f"## 表格信息\n"
                content += f"页码: {table['page_number']}\n"
                content += f"行数: {table['rows']}\n"
                content += f"列数: {table['columns']}\n\n"
                content += f"## 表格内容\n{table['text_representation']}"
                
                document = OperationDocument(
                    title=title,
                    description=f"来自{parsed_data['file_name']}第{table['page_number']}页的数据表格",
                    content=content,
                    keywords="表格,数据,配置",
                    platform="银行零售贷款系统",
                    difficulty_level=1,
                    risk_level=1,
                    category_id=category.id
                )
                
                db.add(document)
                db.flush()
                
                embedding_id = self.knowledge_service.add_document(db, document)
                
                documents.append({
                    "id": document.id,
                    "title": title,
                    "type": "table",
                    "page_number": table["page_number"],
                    "embedding_id": embedding_id
                })
        
        return documents

    def _group_pages_by_chapter(self, pages: List[Dict]) -> Dict[str, List[Dict]]:
        """按章节分组页面"""
        chapters = {}
        current_chapter = "其他内容"

        for page in pages:
            # 检查是否是新章节
            structure = page.get("structure", {})
            headers = structure.get("headers", [])

            for header in headers:
                if "章" in header or re.match(r'^[一二三四五六七八九十]+、', header):
                    current_chapter = header
                    break

            if current_chapter not in chapters:
                chapters[current_chapter] = []

            chapters[current_chapter].append(page)

        return chapters

    def _merge_chapter_content(self, pages: List[Dict]) -> str:
        """合并章节内容"""
        content_parts = []

        for page in pages:
            cleaned_text = page.get("cleaned_text", "")
            if cleaned_text.strip():
                content_parts.append(cleaned_text)

        return "\n\n".join(content_parts)

    def _split_content_intelligently(self, content: str) -> List[str]:
        """智能分割内容"""
        if len(content) <= self.max_chunk_size:
            return [content]

        chunks = []
        current_chunk = ""

        # 按段落分割
        paragraphs = content.split('\n\n')

        for paragraph in paragraphs:
            # 如果当前块加上新段落超过最大大小
            if len(current_chunk) + len(paragraph) > self.max_chunk_size:
                if current_chunk:
                    chunks.append(current_chunk.strip())
                    current_chunk = ""

                # 如果单个段落太长，按句子分割
                if len(paragraph) > self.max_chunk_size:
                    sentences = self._split_by_sentences(paragraph)
                    for sentence in sentences:
                        if len(current_chunk) + len(sentence) > self.max_chunk_size:
                            if current_chunk:
                                chunks.append(current_chunk.strip())
                                current_chunk = sentence
                            else:
                                chunks.append(sentence)
                        else:
                            current_chunk += sentence + " "
                else:
                    current_chunk = paragraph
            else:
                if current_chunk:
                    current_chunk += "\n\n" + paragraph
                else:
                    current_chunk = paragraph

        if current_chunk.strip():
            chunks.append(current_chunk.strip())

        return chunks

    def _split_by_sentences(self, text: str) -> List[str]:
        """按句子分割文本"""
        # 简单的句子分割
        sentences = re.split(r'[。！？；]', text)
        return [s.strip() + "。" for s in sentences if s.strip()]

    def _extract_smart_keywords(self, content: str) -> str:
        """智能提取关键词"""
        keywords = set()

        # 银行业务相关关键词
        banking_keywords = [
            "贷款", "授信", "客户", "合同", "借据", "还款", "利率", "风险",
            "审批", "放款", "逾期", "担保", "抵押", "征信", "额度", "期限",
            "本金", "利息", "罚息", "违约金", "展期", "核销", "分类",
            "管理", "查询", "配置", "参数", "模板", "规则", "检查"
        ]

        # 操作相关关键词
        operation_keywords = [
            "菜单", "路径", "点击", "选择", "输入", "确认", "保存", "提交",
            "查看", "编辑", "删除", "新增", "修改", "导出", "导入", "打印"
        ]

        # 提取银行业务关键词
        for keyword in banking_keywords:
            if keyword in content:
                keywords.add(keyword)

        # 提取操作关键词
        for keyword in operation_keywords:
            if keyword in content:
                keywords.add(keyword)

        # 提取数字和特殊标识
        numbers = re.findall(r'\d+', content)
        if numbers:
            keywords.add("数字配置")

        # 提取系统模块名称
        modules = re.findall(r'[一二三四五六七八九十]+、\s*([^。\n]+)', content)
        for module in modules[:3]:  # 最多3个
            if len(module.strip()) < 20:
                keywords.add(module.strip())

        return ",".join(list(keywords)[:10])  # 最多10个关键词

    def _assess_difficulty_level(self, content: str) -> int:
        """评估难度级别"""
        # 基于内容复杂度评估难度
        complexity_indicators = [
            "审批", "风险", "核销", "分类", "配置", "规则",
            "批量", "导入", "导出", "权限", "授权"
        ]

        complexity_score = 0
        for indicator in complexity_indicators:
            if indicator in content:
                complexity_score += 1

        # 基于操作步骤数量
        steps = re.findall(r'\d+[.)]\s*', content)
        step_count = len(steps)

        if complexity_score >= 3 or step_count >= 8:
            return 3  # 高难度
        elif complexity_score >= 1 or step_count >= 4:
            return 2  # 中等难度
        else:
            return 1  # 低难度

    def _assess_risk_level(self, content: str) -> int:
        """评估风险级别"""
        high_risk_keywords = [
            "删除", "核销", "失效", "禁用", "清空", "重置",
            "批量", "导入", "权限", "授权", "审批"
        ]

        medium_risk_keywords = [
            "修改", "编辑", "配置", "设置", "变更", "调整"
        ]

        risk_score = 0

        for keyword in high_risk_keywords:
            if keyword in content:
                risk_score += 2

        for keyword in medium_risk_keywords:
            if keyword in content:
                risk_score += 1

        if risk_score >= 4:
            return 3  # 高风险
        elif risk_score >= 2:
            return 2  # 中等风险
        else:
            return 1  # 低风险

    def _extract_operation_title(self, page: Dict) -> str:
        """提取操作标题"""
        # 从页面结构中提取标题
        structure = page.get("structure", {})

        # 优先使用headers
        headers = structure.get("headers", [])
        if headers:
            return headers[0]

        # 其次使用sections
        sections = structure.get("sections", [])
        if sections:
            return sections[0]

        # 从菜单路径提取
        key_info = page.get("key_info", {})
        menu_paths = key_info.get("menu_paths", [])
        if menu_paths:
            for path in menu_paths:
                if "-" in path:
                    return path.split("-")[-1].strip()

        # 默认标题
        return f"第{page['page_number']}页操作说明"

    def _get_or_create_category(self, db: Session, name: str, description: str) -> OperationCategory:
        """获取或创建文档分类"""
        category = db.query(OperationCategory).filter(OperationCategory.name == name).first()

        if not category:
            category = OperationCategory(
                name=name,
                description=description
            )
            db.add(category)
            db.flush()

        return category

    def _save_parsed_data(self, parsed_data: Dict, file_path: Path) -> str:
        """保存解析结果"""
        from app.core.config import settings

        # 构建输出文件路径
        output_dir = Path(settings.knowledge_base_path) / "parsed"
        output_dir.mkdir(exist_ok=True)

        output_file = output_dir / f"{file_path.stem}_parsed.json"

        # 使用解析器的保存方法
        return self.pdf_parser.save_parsed_data(parsed_data, str(output_file))
