"""
文档解析器 - 支持多种文档格式的解析
包含创新的结构感知解析功能
"""

import asyncio
from typing import Dict, Any, List, Optional, Union
from pathlib import Path
import mimetypes
from abc import ABC, abstractmethod

# PDF处理
import fitz  # PyMuPDF
from docx import Document as DocxDocument
import pandas as pd
from bs4 import BeautifulSoup
import json
import re

from ...core.interfaces import Document
from ...core.events import EventEmitter
from ...core.config import config


class DocumentParser(ABC):
    """文档解析器抽象基类"""
    
    @abstractmethod
    async def parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析文档内容"""
        pass
    
    @abstractmethod
    def supports(self, file_extension: str, mime_type: str = None) -> bool:
        """检查是否支持该文件类型"""
        pass


class PDFParser(DocumentParser):
    """PDF文档解析器 - 结构感知"""
    
    def supports(self, file_extension: str, mime_type: str = None) -> bool:
        return file_extension.lower() in ['.pdf']
    
    async def parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析PDF文档，保留结构信息"""
        try:
            # 使用PyMuPDF解析
            doc = fitz.open(stream=content, filetype="pdf")
            
            # 提取结构化内容
            structured_content = await self._extract_structured_content(doc)
            
            # 合并为完整文本
            full_text = self._combine_structured_content(structured_content)
            
            # 增强元数据
            enhanced_metadata = {
                **metadata,
                "page_count": doc.page_count,
                "structure": structured_content["structure"],
                "formatting": structured_content["formatting"],
                "parser": "PDFParser",
                "parse_method": "structure_aware"
            }
            
            doc.close()
            
            return Document(
                id=metadata.get("id", ""),
                content=full_text,
                metadata=enhanced_metadata
            )
            
        except Exception as e:
            # 降级为简单文本提取
            return await self._fallback_parse(content, metadata)
    
    async def _extract_structured_content(self, doc) -> Dict[str, Any]:
        """提取结构化内容 - 创新功能"""
        structure = []
        formatting = {
            "fonts": set(),
            "font_sizes": set(),
            "colors": set()
        }
        
        for page_num in range(doc.page_count):
            page = doc[page_num]
            
            # 获取文本块
            blocks = page.get_text("dict")
            
            page_structure = {
                "page": page_num + 1,
                "sections": []
            }
            
            current_section = None
            
            for block in blocks["blocks"]:
                if "lines" in block:
                    for line in block["lines"]:
                        for span in line["spans"]:
                            text = span["text"].strip()
                            if not text:
                                continue
                            
                            # 记录格式信息
                            font = span["font"]
                            size = span["size"]
                            
                            formatting["fonts"].add(font)
                            formatting["font_sizes"].add(size)
                            
                            # 判断是否为标题
                            is_title = self._is_likely_title(text, size, font)
                            
                            if is_title:
                                # 新的章节
                                if current_section:
                                    page_structure["sections"].append(current_section)
                                
                                current_section = {
                                    "title": text,
                                    "content": [],
                                    "font": font,
                                    "size": size,
                                    "type": "section"
                                }
                            else:
                                # 普通内容
                                if not current_section:
                                    current_section = {
                                        "title": "Content",
                                        "content": [],
                                        "type": "content"
                                    }
                                
                                current_section["content"].append(text)
            
            # 添加最后一个章节
            if current_section:
                page_structure["sections"].append(current_section)
            
            structure.append(page_structure)
        
        # 转换set为list以便JSON序列化
        formatting["fonts"] = list(formatting["fonts"])
        formatting["font_sizes"] = list(formatting["font_sizes"])
        formatting["colors"] = list(formatting["colors"])
        
        return {
            "structure": structure,
            "formatting": formatting
        }
    
    def _is_likely_title(self, text: str, size: float, font: str) -> bool:
        """判断文本是否可能是标题"""
        # 基于字体大小和内容特征判断
        if size > 14:  # 大字体
            return True
        
        if len(text) < 100 and (
            text.isupper() or  # 全大写
            bool(re.match(r'^[\d\.]+\s+[A-Z]', text)) or  # 数字开头
            text.endswith(':')  # 冒号结尾
        ):
            return True
        
        return False
    
    def _combine_structured_content(self, structured_content: Dict[str, Any]) -> str:
        """将结构化内容合并为文本"""
        text_parts = []
        
        for page in structured_content["structure"]:
            for section in page["sections"]:
                # 添加标题
                if section.get("title") and section["title"] != "Content":
                    text_parts.append(f"\n## {section['title']}\n")
                
                # 添加内容
                if section.get("content"):
                    text_parts.append(" ".join(section["content"]))
        
        return "\n".join(text_parts)
    
    async def _fallback_parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """降级解析方法"""
        try:
            doc = fitz.open(stream=content, filetype="pdf")
            text = ""
            for page in doc:
                text += page.get_text()
            doc.close()
            
            return Document(
                id=metadata.get("id", ""),
                content=text,
                metadata={**metadata, "parser": "PDFParser", "parse_method": "fallback"}
            )
        except Exception as e:
            raise ValueError(f"Failed to parse PDF: {str(e)}")


class DOCXParser(DocumentParser):
    """DOCX文档解析器"""
    
    def supports(self, file_extension: str, mime_type: str = None) -> bool:
        return file_extension.lower() in ['.docx', '.doc']
    
    async def parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析DOCX文档"""
        try:
            # 创建临时文件
            import tempfile
            with tempfile.NamedTemporaryFile(suffix='.docx') as tmp_file:
                tmp_file.write(content)
                tmp_file.flush()
                
                # 解析文档
                doc = DocxDocument(tmp_file.name)
                
                # 提取文本和结构
                text_parts = []
                structure = []
                
                for para in doc.paragraphs:
                    text = para.text.strip()
                    if text:
                        # 检查样式
                        style_name = para.style.name if para.style else "Normal"
                        
                        if "Heading" in style_name:
                            structure.append({
                                "type": "heading",
                                "level": self._get_heading_level(style_name),
                                "text": text
                            })
                            text_parts.append(f"\n## {text}\n")
                        else:
                            text_parts.append(text)
                
                full_text = "\n".join(text_parts)
                
                enhanced_metadata = {
                    **metadata,
                    "paragraph_count": len(doc.paragraphs),
                    "structure": structure,
                    "parser": "DOCXParser"
                }
                
                return Document(
                    id=metadata.get("id", ""),
                    content=full_text,
                    metadata=enhanced_metadata
                )
                
        except Exception as e:
            raise ValueError(f"Failed to parse DOCX: {str(e)}")
    
    def _get_heading_level(self, style_name: str) -> int:
        """获取标题级别"""
        if "Heading" in style_name:
            try:
                return int(re.search(r'\d+', style_name).group())
            except:
                return 1
        return 1


class ExcelParser(DocumentParser):
    """Excel文档解析器"""
    
    def supports(self, file_extension: str, mime_type: str = None) -> bool:
        return file_extension.lower() in ['.xlsx', '.xls', '.csv']
    
    async def parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析Excel文档"""
        try:
            import tempfile
            
            # 确定文件类型
            file_ext = metadata.get("file_extension", "").lower()
            
            if file_ext == '.csv':
                # CSV文件
                import io
                df = pd.read_csv(io.BytesIO(content))
            else:
                # Excel文件
                with tempfile.NamedTemporaryFile(suffix=file_ext) as tmp_file:
                    tmp_file.write(content)
                    tmp_file.flush()
                    df = pd.read_excel(tmp_file.name, sheet_name=None)
            
            # 处理多个工作表的情况
            if isinstance(df, dict):
                text_parts = []
                sheets_info = []
                
                for sheet_name, sheet_df in df.items():
                    text_parts.append(f"\n## Sheet: {sheet_name}\n")
                    text_parts.append(sheet_df.to_string(index=False))
                    
                    sheets_info.append({
                        "name": sheet_name,
                        "rows": len(sheet_df),
                        "columns": len(sheet_df.columns),
                        "columns_list": list(sheet_df.columns)
                    })
                
                full_text = "\n".join(text_parts)
                
                enhanced_metadata = {
                    **metadata,
                    "sheets": sheets_info,
                    "total_sheets": len(df),
                    "parser": "ExcelParser"
                }
            else:
                # 单个工作表
                full_text = df.to_string(index=False)
                
                enhanced_metadata = {
                    **metadata,
                    "rows": len(df),
                    "columns": len(df.columns),
                    "columns_list": list(df.columns),
                    "parser": "ExcelParser"
                }
            
            return Document(
                id=metadata.get("id", ""),
                content=full_text,
                metadata=enhanced_metadata
            )
            
        except Exception as e:
            raise ValueError(f"Failed to parse Excel: {str(e)}")


class TextParser(DocumentParser):
    """纯文本解析器"""
    
    def supports(self, file_extension: str, mime_type: str = None) -> bool:
        text_extensions = ['.txt', '.md', '.json', '.xml', '.html', '.htm']
        return file_extension.lower() in text_extensions
    
    async def parse(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析文本文档"""
        try:
            # 尝试不同的编码
            encodings = ['utf-8', 'utf-16', 'gb2312', 'gbk', 'big5']
            text = None
            
            for encoding in encodings:
                try:
                    text = content.decode(encoding)
                    break
                except UnicodeDecodeError:
                    continue
            
            if text is None:
                # 使用错误处理策略
                text = content.decode('utf-8', errors='ignore')
            
            # 特殊处理某些格式
            file_ext = metadata.get("file_extension", "").lower()
            
            if file_ext in ['.html', '.htm']:
                # HTML解析
                soup = BeautifulSoup(text, 'html.parser')
                text = soup.get_text()
            elif file_ext == '.json':
                # JSON格式化
                try:
                    json_data = json.loads(text)
                    text = json.dumps(json_data, indent=2, ensure_ascii=False)
                except:
                    pass
            
            enhanced_metadata = {
                **metadata,
                "character_count": len(text),
                "word_count": len(text.split()),
                "parser": "TextParser"
            }
            
            return Document(
                id=metadata.get("id", ""),
                content=text,
                metadata=enhanced_metadata
            )
            
        except Exception as e:
            raise ValueError(f"Failed to parse text: {str(e)}")


class DocumentParserFactory(EventEmitter):
    """文档解析器工厂"""
    
    def __init__(self):
        super().__init__()
        self.parsers: List[DocumentParser] = [
            PDFParser(),
            DOCXParser(), 
            ExcelParser(),
            TextParser()
        ]
    
    def get_parser(self, file_extension: str, mime_type: str = None) -> Optional[DocumentParser]:
        """获取适合的解析器"""
        for parser in self.parsers:
            if parser.supports(file_extension, mime_type):
                return parser
        return None
    
    async def parse_document(self, content: bytes, metadata: Dict[str, Any]) -> Document:
        """解析文档"""
        file_extension = metadata.get("file_extension", "")
        mime_type = metadata.get("mime_type", "")
        
        parser = self.get_parser(file_extension, mime_type)
        if not parser:
            raise ValueError(f"Unsupported file type: {file_extension}")
        
        try:
            document = await parser.parse(content, metadata)
            
            await self.emit("document_parsed", {
                "document_id": document.id,
                "parser": parser.__class__.__name__,
                "content_length": len(document.content),
                "metadata_keys": list(document.metadata.keys())
            })
            
            return document
            
        except Exception as e:
            await self.emit_error("document_parsing", e)
            raise
    
    def register_parser(self, parser: DocumentParser):
        """注册新的解析器"""
        self.parsers.insert(0, parser)  # 新解析器优先级更高