"""
文档处理器模块
负责各种格式文档的文本提取和分块
"""
import os
import re
from typing import List, Dict, Any, Optional
from abc import ABC, abstractmethod
import PyPDF2
from docx import Document
import openpyxl
import pandas as pd
from ..core.config import config
from .ai_chunker import AIDocumentChunker


class BaseDocumentProcessor(ABC):
    """文档处理器基类"""
    
    def __init__(self):
        self.chunk_size = config.get('documents.chunk_size', 1000)
        self.chunk_overlap = config.get('documents.chunk_overlap', 200)
        self.chunking_strategy = config.get('documents.chunking.strategy', 'traditional')
        self.ai_model = config.get('documents.chunking.ai_model', 'deepseek-r1:1.5b')
        self.fallback_to_traditional = config.get('documents.chunking.fallback_to_traditional', True)
        
        # 初始化AI分块器
        if self.chunking_strategy.startswith('ai_'):
            self.ai_chunker = AIDocumentChunker(
                model_name=self.ai_model,
                ollama_url="http://localhost:11434"
            )
        else:
            self.ai_chunker = None
    
    @abstractmethod
    def extract_text(self, file_path: str) -> str:
        """提取文档文本"""
        pass
    
    def chunk_text(self, text: str) -> List[str]:
        """将文本分块"""
        if not text:
            return []
        
        # 如果文本很短，直接返回
        if len(text) <= self.chunk_size:
            return [text] if text else []
        
        # 根据策略选择分块方法
        if self.chunking_strategy == 'traditional':
            return self._traditional_chunking(text)
        elif self.chunking_strategy == 'ai_semantic':
            return self._ai_chunking(text, 'semantic')
        elif self.chunking_strategy == 'ai_topic':
            return self._ai_chunking(text, 'topic')
        elif self.chunking_strategy == 'ai_hybrid':
            return self._ai_chunking(text, 'hybrid')
        else:
            return self._traditional_chunking(text)
    
    def _traditional_chunking(self, text: str) -> List[str]:
        """传统分块方法"""
        chunks = []
        start = 0
        
        while start < len(text):
            end = start + self.chunk_size
            
            # 如果不是最后一块，尝试在句号或换行处分割
            if end < len(text):
                # 寻找最近的句号
                period_pos = text.rfind('。', start, end)
                newline_pos = text.rfind('\n', start, end)
                
                if period_pos > start and period_pos > newline_pos:
                    end = period_pos + 1
                elif newline_pos > start:
                    end = newline_pos + 1
            
            chunk = text[start:end].strip()
            if chunk:
                chunks.append(chunk)
            
            # 计算下一块的起始位置，考虑重叠
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        return chunks
    
    def _ai_chunking(self, text: str, strategy: str) -> List[str]:
        """AI分块方法"""
        if not self.ai_chunker:
            return self._traditional_chunking(text)
        
        try:
            return self.ai_chunker.chunk_with_ai(text, strategy)
        except Exception as e:
            print(f"AI分块失败 ({strategy}): {e}")
            if self.fallback_to_traditional:
                print("回退到传统分块方法")
                return self._traditional_chunking(text)
            else:
                raise e
    
    def process_document(self, file_path: str) -> Dict[str, Any]:
        """处理文档"""
        try:
            text = self.extract_text(file_path)
            chunks = self.chunk_text(text)
            
            return {
                'success': True,
                'text': text,
                'chunks': chunks,
                'chunk_count': len(chunks)
            }
        except Exception as e:
            return {
                'success': False,
                'error': str(e),
                'text': '',
                'chunks': [],
                'chunk_count': 0
            }


class PDFProcessor(BaseDocumentProcessor):
    """PDF文档处理器"""
    
    def extract_text(self, file_path: str) -> str:
        """提取PDF文本"""
        text = ""
        with open(file_path, 'rb') as file:
            pdf_reader = PyPDF2.PdfReader(file)
            for page in pdf_reader.pages:
                text += page.extract_text() + "\n"
        return text


class WordProcessor(BaseDocumentProcessor):
    """Word文档处理器"""
    
    def extract_text(self, file_path: str) -> str:
        """提取Word文档文本"""
        doc = Document(file_path)
        text = ""
        for paragraph in doc.paragraphs:
            text += paragraph.text + "\n"
        return text


class ExcelProcessor(BaseDocumentProcessor):
    """Excel文档处理器"""
    
    def extract_text(self, file_path: str) -> str:
        """提取Excel文档文本"""
        text = ""
        
        # 读取所有工作表
        excel_file = openpyxl.load_workbook(file_path, data_only=True)
        
        for sheet_name in excel_file.sheetnames:
            sheet = excel_file[sheet_name]
            text += f"工作表: {sheet_name}\n"
            
            # 获取有数据的范围
            for row in sheet.iter_rows(values_only=True):
                row_text = " | ".join([str(cell) if cell is not None else "" for cell in row])
                if row_text.strip():
                    text += row_text + "\n"
            text += "\n"
        
        return text


class TextProcessor(BaseDocumentProcessor):
    """文本文件处理器"""
    
    def extract_text(self, file_path: str) -> str:
        """提取文本文件内容"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                return f.read()
        except UnicodeDecodeError:
            # 尝试其他编码
            with open(file_path, 'r', encoding='gbk') as f:
                return f.read()


class MarkdownProcessor(BaseDocumentProcessor):
    """Markdown文件处理器"""
    
    def extract_text(self, file_path: str) -> str:
        """提取Markdown文本"""
        with open(file_path, 'r', encoding='utf-8') as f:
            return f.read()


class DocumentProcessorFactory:
    """文档处理器工厂"""
    
    _processors = {
        '.pdf': PDFProcessor,
        '.docx': WordProcessor,
        '.xlsx': ExcelProcessor,
        '.txt': TextProcessor,
        '.md': MarkdownProcessor
    }
    
    @classmethod
    def get_processor(cls, file_path: str) -> Optional[BaseDocumentProcessor]:
        """根据文件扩展名获取处理器"""
        _, ext = os.path.splitext(file_path.lower())
        processor_class = cls._processors.get(ext)
        
        if processor_class:
            return processor_class()
        return None
    
    @classmethod
    def get_supported_formats(cls) -> List[str]:
        """获取支持的格式"""
        return list(cls._processors.keys())
    
    @classmethod
    def is_supported(cls, file_path: str) -> bool:
        """检查文件是否支持"""
        _, ext = os.path.splitext(file_path.lower())
        return ext in cls._processors 