"""文档解析模块，支持多种格式文档的解析"""

import logging
import os
import re
from typing import Dict, List, Any, Optional, BinaryIO
import tempfile
from abc import ABC, abstractmethod

# 文档解析库
try:
    import PyPDF2
except ImportError:
    PyPDF2 = None

try:
    import docx
except ImportError:
    docx = None

try:
    import pandas as pd
except ImportError:
    pd = None

try:
    import markdown
except ImportError:
    markdown = None

logger = logging.getLogger('document_parser')

class DocumentParser(ABC):
    """文档解析器抽象基类"""
    
    @abstractmethod
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None) -> Dict[str, Any]:
        """解析文档
        
        Args:
            file_path: 文档文件路径
            file_stream: 文档文件流
            
        Returns:
            解析结果字典，包含文本内容和元数据
        """
        pass
    
    @abstractmethod
    def supports(self, file_extension: str) -> bool:
        """检查是否支持指定文件扩展名
        
        Args:
            file_extension: 文件扩展名（小写）
            
        Returns:
            是否支持
        """
        pass

class PDFParser(DocumentParser):
    """PDF文档解析器"""
    
    def __init__(self):
        if PyPDF2 is None:
            logger.warning("PyPDF2库未安装，PDF解析功能不可用")
            self.available = False
        else:
            self.available = True
    
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None) -> Dict[str, Any]:
        """解析PDF文档
        
        Args:
            file_path: PDF文件路径
            file_stream: PDF文件流
            
        Returns:
            解析结果，包含文本内容和元数据
        """
        if not self.available:
            raise ImportError("PyPDF2库未安装，无法解析PDF文件")
        
        if not file_path and not file_stream:
            raise ValueError("必须提供file_path或file_stream")
        
        try:
            content = []
            metadata = {
                'format': 'pdf',
                'pages': 0,
                'parsed_sections': []
            }
            
            # 打开文件
            if file_path:
                with open(file_path, 'rb') as f:
                    reader = PyPDF2.PdfReader(f)
                    metadata['filename'] = os.path.basename(file_path)
                    self._extract_content(reader, content, metadata)
            else:
                reader = PyPDF2.PdfReader(file_stream)
                self._extract_content(reader, content, metadata)
            
            # 合并内容
            full_text = '\n\n'.join(content)
            
            return {
                'text': full_text,
                'metadata': metadata
            }
        except Exception as e:
            logger.error(f"解析PDF文件失败: {str(e)}")
            raise
    
    def _extract_content(self, reader: PyPDF2.PdfReader, 
                        content: List[str], 
                        metadata: Dict[str, Any]) -> None:
        """从PDF读取器中提取内容"""
        # 获取页数
        metadata['pages'] = len(reader.pages)
        
        # 提取每一页的文本
        for page_num in range(len(reader.pages)):
            page = reader.pages[page_num]
            text = page.extract_text()
            
            # 清理文本
            text = self._clean_text(text)
            
            if text.strip():
                content.append(text)
                metadata['parsed_sections'].append({
                    'page': page_num + 1,
                    'content_length': len(text)
                })
        
        # 尝试提取文档信息
        try:
            if reader.metadata:
                doc_info = {
                    key.replace('/', ''): value for key, value in reader.metadata.items()
                    if isinstance(value, str) and value.strip()
                }
                metadata['document_info'] = doc_info
        except Exception:
            pass
    
    def _clean_text(self, text: str) -> str:
        """清理提取的文本"""
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        # 移除控制字符
        text = re.sub(r'[\x00-\x1f\x7f]', '', text)
        # 移除多余的连字符
        text = re.sub(r'(?<![a-zA-Z])-(?=[a-zA-Z])', '', text)
        return text.strip()
    
    def supports(self, file_extension: str) -> bool:
        """检查是否支持PDF文件"""
        return file_extension.lower() == '.pdf'

class DOCXParser(DocumentParser):
    """DOCX文档解析器"""
    
    def __init__(self):
        if docx is None:
            logger.warning("python-docx库未安装，DOCX解析功能不可用")
            self.available = False
        else:
            self.available = True
    
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None) -> Dict[str, Any]:
        """解析DOCX文档
        
        Args:
            file_path: DOCX文件路径
            file_stream: DOCX文件流
            
        Returns:
            解析结果，包含文本内容和元数据
        """
        if not self.available:
            raise ImportError("python-docx库未安装，无法解析DOCX文件")
        
        if not file_path and not file_stream:
            raise ValueError("必须提供file_path或file_stream")
        
        try:
            content = []
            metadata = {
                'format': 'docx',
                'paragraphs': 0,
                'headings': {},
                'parsed_sections': []
            }
            
            # 打开文件
            if file_path:
                doc = docx.Document(file_path)
                metadata['filename'] = os.path.basename(file_path)
            else:
                # 保存流到临时文件
                with tempfile.NamedTemporaryFile(delete=False, suffix='.docx') as temp:
                    temp.write(file_stream.read())
                    temp_path = temp.name
                
                try:
                    doc = docx.Document(temp_path)
                finally:
                    os.unlink(temp_path)
            
            # 提取段落文本
            for para in doc.paragraphs:
                text = para.text.strip()
                if text:
                    content.append(text)
                    metadata['paragraphs'] += 1
                    
                    # 检查是否是标题
                    if para.style.name.startswith('Heading'):
                        heading_level = self._get_heading_level(para.style.name)
                        if heading_level not in metadata['headings']:
                            metadata['headings'][heading_level] = 0
                        metadata['headings'][heading_level] += 1
                        
                        metadata['parsed_sections'].append({
                            'type': f'heading_{heading_level}',
                            'content': text[:100] + ('...' if len(text) > 100 else ''),
                            'content_length': len(text)
                        })
                    else:
                        metadata['parsed_sections'].append({
                            'type': 'paragraph',
                            'content_length': len(text)
                        })
            
            # 提取表格内容
            tables_content = []
            for table_idx, table in enumerate(doc.tables):
                table_text = []
                for row in table.rows:
                    row_text = []
                    for cell in row.cells:
                        cell_text = cell.text.strip()
                        if cell_text:
                            row_text.append(cell_text)
                    if row_text:
                        table_text.append(' | '.join(row_text))
                
                if table_text:
                    tables_content.append('\n'.join(table_text))
                    metadata['parsed_sections'].append({
                        'type': 'table',
                        'table_index': table_idx + 1,
                        'rows': len(table.rows),
                        'columns': len(table.columns)
                    })
            
            # 合并段落和表格内容
            full_text = '\n\n'.join(content)
            if tables_content:
                full_text += '\n\n[表格内容]\n\n' + '\n\n'.join(tables_content)
                metadata['tables'] = len(doc.tables)
            
            return {
                'text': full_text,
                'metadata': metadata
            }
        except Exception as e:
            logger.error(f"解析DOCX文件失败: {str(e)}")
            raise
    
    def _get_heading_level(self, style_name: str) -> int:
        """从样式名称中提取标题级别"""
        match = re.search(r'Heading (\d+)', style_name)
        if match:
            return int(match.group(1))
        return 0
    
    def supports(self, file_extension: str) -> bool:
        """检查是否支持DOCX文件"""
        return file_extension.lower() == '.docx'

class XLSXParser(DocumentParser):
    """XLSX文档解析器，支持表格数据的行和列标题处理、合并单元格信息、表格结构信息和特殊格式处理"""
    
    def __init__(self):
        if pd is None:
            logger.warning("pandas库未安装，XLSX解析功能不可用")
            self.available = False
        else:
            self.available = True
        
        # 尝试导入openpyxl用于处理合并单元格
        try:
            import openpyxl
            self.openpyxl_available = True
        except ImportError:
            logger.warning("openpyxl库未安装，无法处理合并单元格信息")
            self.openpyxl_available = False
    
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None, 
              options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
        """解析XLSX文档
        
        Args:
            file_path: XLSX文件路径
            file_stream: XLSX文件流
            options: 解析选项，包含：
                - detect_headers: 是否自动检测表头（默认True）
                - process_merged_cells: 是否处理合并单元格（默认True）
                - max_rows: 最大解析行数（默认200）
                - convert_dates: 是否转换日期格式（默认True）
                - preserve_formatting: 是否保留特殊格式信息（默认True）
        
        Returns:
            解析结果，包含文本内容和元数据
        """
        if not self.available:
            raise ImportError("pandas库未安装，无法解析XLSX文件")
        
        if not file_path and not file_stream:
            raise ValueError("必须提供file_path或file_stream")
        
        # 设置默认选项
        default_options = {
            'detect_headers': True,
            'process_merged_cells': True,
            'max_rows': 200,
            'convert_dates': True,
            'preserve_formatting': True
        }
        
        parse_options = {**default_options, **(options or {})}
        
        try:
            content = []
            metadata = {
                'format': 'xlsx',
                'sheets': [],
                'total_cells': 0,
                'non_empty_cells': 0,
                'parsed_sections': [],
                'parse_options': parse_options
            }
            
            # 读取Excel文件
            if file_path:
                excel_data = pd.ExcelFile(file_path)
                metadata['filename'] = os.path.basename(file_path)
                # 用于处理合并单元格的workbook对象
                workbook = None
                if parse_options['process_merged_cells'] and self.openpyxl_available:
                    workbook = openpyxl.load_workbook(file_path, data_only=True)
            else:
                excel_data = pd.ExcelFile(file_stream)
                # 对于文件流，我们无法直接获取合并单元格信息
                workbook = None
                if parse_options['process_merged_cells'] and self.openpyxl_available:
                    logger.warning("对于文件流，无法处理合并单元格信息")
            
            # 遍历所有工作表
            for sheet_name in excel_data.sheet_names:
                df = pd.read_excel(excel_data, sheet_name=sheet_name)
                
                # 获取合并单元格信息
                merged_cells = []
                if workbook and parse_options['process_merged_cells']:
                    worksheet = workbook[sheet_name]
                    merged_cells = self._get_merged_cells(worksheet)
                
                # 转换数据框为文本
                sheet_text = self._convert_dataframe_to_text(
                    df, 
                    sheet_name, 
                    merged_cells, 
                    parse_options
                )
                content.append(sheet_text)
                
                # 检测表头信息
                header_info = None
                if parse_options['detect_headers']:
                    header_info = self._detect_headers(df)
                
                # 收集元数据
                sheet_info = {
                    'name': sheet_name,
                    'rows': len(df),
                    'columns': len(df.columns),
                    'non_empty_cells': df.count().sum(),
                    'merged_cells_count': len(merged_cells),
                    'header_info': header_info
                }
                metadata['sheets'].append(sheet_info)
                metadata['total_cells'] += len(df) * len(df.columns)
                metadata['non_empty_cells'] += sheet_info['non_empty_cells']
                
                metadata['parsed_sections'].append({
                    'type': 'sheet',
                    'name': sheet_name,
                    'rows': len(df),
                    'columns': len(df.columns),
                    'has_header': header_info is not None
                })
            
            # 合并所有工作表内容
            full_text = '\n\n--- 分隔工作表 ---\n\n'.join(content)
            
            return {
                'text': full_text,
                'metadata': metadata
            }
        except Exception as e:
            logger.error(f"解析XLSX文件失败: {str(e)}")
            raise
    
    def _get_merged_cells(self, worksheet: Any) -> List[Dict[str, Any]]:
        """获取工作表中的合并单元格信息"""
        merged_cells = []
        for merged_cell in worksheet.merged_cells.ranges:
            merged_cells.append({
                'start_row': merged_cell.min_row,
                'end_row': merged_cell.max_row,
                'start_col': merged_cell.min_col,
                'end_col': merged_cell.max_col,
                'value': worksheet.cell(row=merged_cell.min_row, column=merged_cell.min_col).value
            })
        return merged_cells
    
    def _detect_headers(self, df: pd.DataFrame) -> Optional[Dict[str, Any]]:
        """检测数据表的表头信息"""
        if len(df) == 0:
            return None
        
        # 检查第一行是否包含文本数据（可能是表头）
        first_row = df.iloc[0]
        has_text_in_first_row = any(
            isinstance(val, str) and len(val.strip()) > 0 
            for val in first_row if pd.notna(val)
        )
        
        # 检查列名是否为默认的数字索引
        has_numeric_columns = all(
            isinstance(col, int) or str(col).isdigit() 
            for col in df.columns
        )
        
        if has_text_in_first_row and has_numeric_columns:
            # 第一行可能是真正的表头
            return {
                'is_header_row': True,
                'header_row_index': 0,
                'headers': list(first_row.astype(str))
            }
        else:
            # 当前列名可能就是表头
            return {
                'is_header_row': False,
                'headers': list(df.columns.astype(str))
            }
    
    def _convert_dataframe_to_text(self, 
                                  df: pd.DataFrame, 
                                  sheet_name: str, 
                                  merged_cells: List[Dict[str, Any]],
                                  options: Dict[str, Any]) -> str:
        """将数据框转换为文本，增强版支持表头识别、合并单元格和特殊格式"""
        lines = [f"工作表: {sheet_name}", "=" * 40]
        
        # 添加表格结构信息
        lines.append(f"表格结构: {len(df.columns)}列 x {len(df)}行")
        if merged_cells:
            lines.append(f"合并单元格数量: {len(merged_cells)}")
        lines.append("=" * 40)
        
        # 检测并处理表头
        header_info = self._detect_headers(df)
        if header_info:
            lines.append("表头信息:")
            if header_info['is_header_row']:
                lines.append(f"  - 表头位于第{header_info['header_row_index'] + 1}行")
                # 将第一行作为表头
                headers = header_info['headers']
                data_df = df.iloc[1:]
                # 重命名列
                data_df.columns = headers
            else:
                headers = header_info['headers']
                data_df = df.copy()
                lines.append(f"  - 当前列名即为表头")
            
            # 添加表头行
            header_line = " | ".join([f"{h:<15}" for h in headers])
            lines.append(header_line)
            lines.append("-" * len(header_line))
        else:
            # 使用默认列名
            headers = [str(col) for col in df.columns]
            data_df = df.copy()
            header_line = " | ".join([f"{h:<15}" for h in headers])
            lines.append(header_line)
            lines.append("-" * len(header_line))
        
        # 添加数据行（限制行数，避免内容过多）
        max_rows = min(len(data_df), options.get('max_rows', 200))
        
        # 收集每列的最大宽度，用于对齐
        column_widths = [len(str(h)) for h in headers]
        sample_rows = min(5, len(data_df))  # 只检查前几行来确定宽度
        for i in range(sample_rows):
            if i >= len(data_df):
                break
            row = data_df.iloc[i]
            for j, value in enumerate(row):
                str_value = self._format_value(value, options)
                column_widths[j] = max(column_widths[j], len(str_value))
        
        # 添加数据行
        for i in range(max_rows):
            row = data_df.iloc[i]
            row_values = []
            for j, value in enumerate(row):
                str_value = self._format_value(value, options)
                # 根据列宽对齐
                formatted_value = f"{str_value:<{column_widths[j]}}"
                row_values.append(formatted_value)
            
            # 检查是否有合并单元格包含当前行
            merged_info = []
            for merged in merged_cells:
                if merged['start_row'] - 1 <= i <= merged['end_row'] - 1:
                    merged_info.append(
                        f"合并单元格: ({merged['start_row']},{merged['start_col']})-({merged['end_row']},{merged['end_col']})="
                        f"{merged['value']}"
                    )
            
            lines.append(" | ".join(row_values))
            if merged_info:
                lines.append(f"  [{', '.join(merged_info)}]")
        
        # 如果有更多行，添加提示
        if len(data_df) > max_rows:
            lines.append(f"... 还有 {len(data_df) - max_rows} 行数据未显示 ...")
        
        return '\n'.join(lines)
    
    def _format_value(self, value: Any, options: Dict[str, Any]) -> str:
        """格式化单元格值，处理日期、货币等特殊格式"""
        if pd.isna(value):
            return ''
        
        # 处理日期格式
        if options.get('convert_dates', True):
            if isinstance(value, (pd.Timestamp, datetime.datetime)):
                return value.strftime('%Y-%m-%d %H:%M:%S')
            elif isinstance(value, datetime.date):
                return value.strftime('%Y-%m-%d')
        
        # 处理整数格式的浮点数
        if isinstance(value, float) and value.is_integer():
            return str(int(value))
        
        # 处理货币格式
        if options.get('preserve_formatting', True):
            if isinstance(value, (int, float)) and value < 1000000 and value > -1000000:
                # 尝试检测是否为货币值（简单判断）
                if abs(value) == round(abs(value), 2):
                    return f"{value:.2f}"
        
        return str(value)
    
    def supports(self, file_extension: str) -> bool:
        """检查是否支持XLSX文件"""
        return file_extension.lower() in ['.xlsx', '.xls']

class MarkdownParser(DocumentParser):
    """Markdown文档解析器"""
    
    def __init__(self):
        if markdown is None:
            logger.warning("markdown库未安装，Markdown解析功能不可用")
            self.available = False
        else:
            self.available = True
    
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None) -> Dict[str, Any]:
        """解析Markdown文档
        
        Args:
            file_path: Markdown文件路径
            file_stream: Markdown文件流
            
        Returns:
            解析结果，包含文本内容和元数据
        """
        if not file_path and not file_stream:
            raise ValueError("必须提供file_path或file_stream")
        
        try:
            # 读取文件内容
            if file_path:
                with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
                    content = f.read()
            else:
                content = file_stream.read().decode('utf-8', errors='replace')
            
            metadata = {
                'format': 'markdown',
                'headings': {},
                'sections': [],
                'has_code_blocks': bool(re.search(r'```', content)),
                'has_links': bool(re.search(r'\[.+\]\(.+\)', content)),
                'has_images': bool(re.search(r'!\[.+\]\(.+\)', content))
            }
            
            # 添加文件名到元数据
            if file_path:
                metadata['filename'] = os.path.basename(file_path)
            
            # 分析Markdown结构
            lines = content.split('\n')
            current_heading = None
            current_section = []
            
            for line in lines:
                # 检查标题
                heading_match = re.match(r'^(#{1,6})\s+(.*)', line)
                if heading_match:
                    # 如果有当前章节，保存它
                    if current_section:
                        metadata['sections'].append({
                            'heading': current_heading or '正文',
                            'content_length': len('\n'.join(current_section))
                        })
                        current_section = []
                    
                    # 处理新标题
                    heading_level = len(heading_match.group(1))
                    heading_text = heading_match.group(2).strip()
                    current_heading = heading_text
                    
                    # 更新标题计数
                    if heading_level not in metadata['headings']:
                        metadata['headings'][heading_level] = 0
                    metadata['headings'][heading_level] += 1
                    
                    # 将标题添加到内容
                    current_section.append(line)
                else:
                    # 添加普通行到当前章节
                    current_section.append(line)
            
            # 保存最后一个章节
            if current_section:
                metadata['sections'].append({
                    'heading': current_heading or '正文',
                    'content_length': len('\n'.join(current_section))
                })
            
            # 如果需要，可以生成HTML（可选功能）
            if self.available:
                try:
                    html_content = markdown.markdown(content)
                    metadata['has_html'] = True
                except Exception:
                    metadata['has_html'] = False
            
            return {
                'text': content,
                'metadata': metadata
            }
        except Exception as e:
            logger.error(f"解析Markdown文件失败: {str(e)}")
            raise
    
    def supports(self, file_extension: str) -> bool:
        """检查是否支持Markdown文件"""
        return file_extension.lower() == '.md'

class TXTParser(DocumentParser):
    """TXT文档解析器"""
    
    def parse(self, file_path: Optional[str] = None, 
              file_stream: Optional[BinaryIO] = None) -> Dict[str, Any]:
        """解析TXT文档
        
        Args:
            file_path: TXT文件路径
            file_stream: TXT文件流
            
        Returns:
            解析结果，包含文本内容和元数据
        """
        if not file_path and not file_stream:
            raise ValueError("必须提供file_path或file_stream")
        
        try:
            metadata = {
                'format': 'txt',
                'line_count': 0,
                'word_count': 0
            }
            
            # 读取文件内容
            if file_path:
                with open(file_path, 'r', encoding='utf-8', errors='replace') as f:
                    content = f.read()
                metadata['filename'] = os.path.basename(file_path)
            else:
                content = file_stream.read().decode('utf-8', errors='replace')
            
            # 计算行数和字数
            lines = content.split('\n')
            metadata['line_count'] = len(lines)
            
            # 计算字数（简单方法，按空格分割）
            words = content.split()
            metadata['word_count'] = len(words)
            
            # 检测是否包含特殊格式（如代码、表格等）
            metadata['has_code_blocks'] = bool(re.search(r'\bdef\s+|\bfunction\s+|\bclass\s+', content))
            metadata['has_table_like'] = bool(re.search(r'\|.*\|', content))
            
            return {
                'text': content,
                'metadata': metadata
            }
        except Exception as e:
            logger.error(f"解析TXT文件失败: {str(e)}")
            raise
    
    def supports(self, file_extension: str) -> bool:
        """检查是否支持TXT文件"""
        return file_extension.lower() == '.txt'

class DocumentParserFactory:
    """文档解析器工厂，根据文件扩展名选择合适的解析器"""
    
    def __init__(self):
        """初始化解析器工厂"""
        self.parsers = [
            PDFParser(),
            DOCXParser(),
            XLSXParser(),
            MarkdownParser(),
            TXTParser()
        ]
        
        # 创建扩展名到解析器的映射
        self.extension_map = {}
        for parser in self.parsers:
            for ext in self._get_supported_extensions(parser):
                self.extension_map[ext.lower()] = parser
    
    def _get_supported_extensions(self, parser: DocumentParser) -> List[str]:
        """获取解析器支持的扩展名列表"""
        # 这个方法是一个简单的实现，实际应用中可能需要更复杂的逻辑
        # 这里我们根据类名推断支持的扩展名
        parser_name = parser.__class__.__name__.lower()
        if 'pdf' in parser_name:
            return ['.pdf']
        elif 'docx' in parser_name:
            return ['.docx']
        elif 'xlsx' in parser_name:
            return ['.xlsx', '.xls']
        elif 'markdown' in parser_name:
            return ['.md']
        elif 'txt' in parser_name:
            return ['.txt']
        return []
    
    def get_parser(self, file_path: Optional[str] = None, 
                  file_extension: Optional[str] = None) -> Optional[DocumentParser]:
        """根据文件路径或扩展名获取合适的解析器
        
        Args:
            file_path: 文件路径
            file_extension: 文件扩展名
            
        Returns:
            解析器实例，如果不支持则返回None
        """
        # 确定文件扩展名
        if file_path:
            _, ext = os.path.splitext(file_path)
            ext = ext.lower()
        elif file_extension:
            ext = file_extension.lower()
            # 确保扩展名以.开头
            if not ext.startswith('.'):
                ext = f'.{ext}'
        else:
            return None
        
        # 返回对应的解析器
        parser = self.extension_map.get(ext)
        
        # 检查解析器是否可用
        if parser and hasattr(parser, 'available') and not parser.available:
            logger.warning(f"解析器{parser.__class__.__name__}不可用")
            return None
        
        return parser
    
    def parse_document(self, file_path: Optional[str] = None, 
                      file_stream: Optional[BinaryIO] = None, 
                      file_extension: Optional[str] = None) -> Dict[str, Any]:
        """解析文档文件
        
        Args:
            file_path: 文件路径
            file_stream: 文件流
            file_extension: 文件扩展名（当使用文件流时需要）
            
        Returns:
            解析结果字典
        """
        # 获取合适的解析器
        parser = self.get_parser(file_path, file_extension)
        if not parser:
            # 如果没有特定的解析器，默认使用TXT解析器
            parser = TXTParser()
            logger.warning(f"不支持的文件格式，使用TXT解析器作为后备")
        
        # 解析文档
        return parser.parse(file_path, file_stream)
    
    def get_supported_formats(self) -> List[Dict[str, Any]]:
        """获取所有支持的文件格式
        
        Returns:
            支持的格式列表
        """
        formats = []
        for ext, parser in self.extension_map.items():
            available = getattr(parser, 'available', True)
            formats.append({
                'extension': ext,
                'type': parser.__class__.__name__,
                'available': available
            })
        
        # 按扩展名排序
        formats.sort(key=lambda x: x['extension'])
        return formats


# 创建全局文档解析器工厂实例
parser_factory = DocumentParserFactory()

__all__ = [
    'DocumentParser',
    'PDFParser',
    'DOCXParser',
    'XLSXParser',
    'MarkdownParser',
    'TXTParser',
    'DocumentParserFactory',
    'parser_factory'
]