import os
import uuid
from datetime import datetime
from typing import Dict, Any, List, Optional
from pathlib import Path
import json

from ..processors.pdf_processor import PDFProcessor
from ..processors.docx_processor import DOCXProcessor
from ..processors.markdown_processor import MarkdownProcessor
from ..core.text_cleaner import TextCleaner
from ..core.chunking_engine import ChunkingEngine

class DocumentProcessor:
    """文档处理器主类"""
    
    def __init__(self, cleaning_config: Dict[str, Any], chunking_config: Dict[str, Any]):
        """
        初始化文档处理器
        
        Args:
            cleaning_config: 数据清洗配置
            chunking_config: 文档切片配置
        """
        self.cleaning_config = cleaning_config
        self.chunking_config = chunking_config
        
        # 初始化处理器
        self.pdf_processor = PDFProcessor()
        self.docx_processor = DOCXProcessor()
        self.markdown_processor = MarkdownProcessor()
        
        # 初始化清洗和切片引擎
        self.text_cleaner = TextCleaner(cleaning_config)
        self.chunking_engine = ChunkingEngine(chunking_config)
    
    def process_file(self, uploaded_file) -> Optional[Dict[str, Any]]:
        """
        处理单个文件
        
        Args:
            uploaded_file: Streamlit上传的文件对象或文件路径字符串
            
        Returns:
            Optional[Dict[str, Any]]: 处理结果，包含文档元数据和切片数据
        """
        
        filename = "unknown"
        try:
            print(f"[DocumentProcessor] 开始处理文件...")
            
            # 处理不同类型的输入
            if isinstance(uploaded_file, str):
                # 如果是文件路径字符串
                file_path = Path(uploaded_file)
                filename = file_path.name
                file_extension = file_path.suffix.lower().lstrip('.')
                
                print(f"[DocumentProcessor] 读取文件: {filename}")
                # 读取文件内容
                with open(file_path, 'rb') as f:
                    file_content = f.read()
            else:
                # 如果是Streamlit文件对象
                filename = uploaded_file.name
                file_extension = Path(uploaded_file.name).suffix.lower().lstrip('.')
                file_content = uploaded_file.getvalue()
                print(f"[DocumentProcessor] 处理上传文件: {filename}, 大小: {len(file_content)} bytes")
            
            # 生成文档ID
            document_id = f"doc_{uuid.uuid4().hex[:8]}"
            print(f"[DocumentProcessor] 生成文档ID: {document_id}")
            
            # 第一步：文档解析
            print(f"[DocumentProcessor] 步骤1: 提取文档内容 ({file_extension})")
            raw_content = self._extract_content(file_extension, file_content, filename)
            if not raw_content:
                raise Exception("文档内容提取失败")
            
            print(f"[DocumentProcessor] 内容提取成功，原始文本长度: {len(raw_content.get('text', ''))} 字符")
            
            # 第二步：数据清洗
            print(f"[DocumentProcessor] 步骤2: 数据清洗")
            try:
                cleaned_content = self.text_cleaner.clean_text(raw_content['text'])
                print(f"[DocumentProcessor] 数据清洗完成，清洗后长度: {len(cleaned_content)} 字符")
            except Exception as clean_error:
                print(f"[DocumentProcessor] 数据清洗失败: {str(clean_error)}")
                # 如果清洗失败，使用原始内容
                cleaned_content = raw_content['text']
                print(f"[DocumentProcessor] 使用原始内容继续处理")
            
            # 第三步：文档切片
            print(f"[DocumentProcessor] 步骤3: 文档切片")
            try:
                chunks = self.chunking_engine.chunk_document(
                    cleaned_content, 
                    raw_content.get('structure', {})
                )
                print(f"[DocumentProcessor] 文档切片完成，生成 {len(chunks)} 个切片")
            except Exception as chunk_error:
                print(f"[DocumentProcessor] 文档切片失败: {str(chunk_error)}")
                # 如果切片失败，创建一个默认切片
                chunks = [{
                    'chunk_id': 'chunk_000',
                    'content': cleaned_content[:1000] if len(cleaned_content) > 1000 else cleaned_content,
                    'metadata': {
                        'chunk_type': 'fallback',
                        'start_pos': 0,
                        'end_pos': min(1000, len(cleaned_content)),
                        'word_count': len(cleaned_content.split()),
                        'char_count': len(cleaned_content),
                        'level': 0
                    }
                }]
                print(f"[DocumentProcessor] 使用默认切片策略，生成 {len(chunks)} 个切片")
            
            # 第四步：构建结果
            print(f"[DocumentProcessor] 步骤4: 构建处理结果")
            result = {
                "document_id": document_id,
                "metadata": {
                    "title": raw_content.get('title') or filename,
                    "source_format": file_extension,
                    "original_filename": filename,
                    "file_size_mb": len(file_content) / (1024 * 1024),
                    "total_chunks": len(chunks),
                    "processing_time": datetime.now().isoformat(),
                    "cleaning_config": self.cleaning_config.copy(),
                    "chunking_config": self.chunking_config.copy(),
                    "original_length": len(raw_content['text']),
                    "cleaned_length": len(cleaned_content),
                    "compression_ratio": len(cleaned_content) / len(raw_content['text']) if raw_content['text'] else 0
                },
                "chunks": chunks,
                "raw_content": raw_content,  # 保留原始内容用于对比
                "cleaned_content": cleaned_content
            }
            
            print(f"[DocumentProcessor] 文件 {filename} 处理完成")
            return result
            
        except Exception as e:
            print(f"[DocumentProcessor] 处理文件 {filename} 时出错: {str(e)}")
            import traceback
            print(f"[DocumentProcessor] 错误堆栈: {traceback.format_exc()}")
            return None
    
    def _extract_content(self, file_extension: str, file_content: bytes, filename: str) -> Optional[Dict[str, Any]]:
        """
        根据文件类型提取内容
        
        Args:
            file_extension: 文件扩展名
            file_content: 文件字节内容
            filename: 文件名
            
        Returns:
            Optional[Dict[str, Any]]: 提取的内容和结构信息
        """
        
        try:
            if file_extension == 'pdf':
                return self.pdf_processor.extract_content(file_content)
            elif file_extension == 'docx':
                return self.docx_processor.extract_content(file_content)
            elif file_extension == 'md':
                return self.markdown_processor.extract_content(file_content)
            else:
                raise Exception(f"不支持的文件格式: {file_extension}")
                
        except Exception as e:
            print(f"内容提取失败 ({filename}): {str(e)}")
            return None
    
    def process_batch(self, uploaded_files: List) -> List[Dict[str, Any]]:
        """
        批量处理文件
        
        Args:
            uploaded_files: 上传的文件列表
            
        Returns:
            List[Dict[str, Any]]: 处理结果列表
        """
        
        results = []
        
        for file in uploaded_files:
            result = self.process_file(file)
            if result:
                results.append(result)
        
        return results
    
    def export_results(self, results: List[Dict[str, Any]], output_format: str = 'json') -> str:
        """
        导出处理结果
        
        Args:
            results: 处理结果列表
            output_format: 输出格式 ('json', 'txt', 'markdown', 'csv')
            
        Returns:
            str: 导出的内容
        """
        
        try:
            if output_format == 'json':
                return self._export_json(results)
            elif output_format == 'txt':
                return self._export_txt(results)
            elif output_format == 'markdown':
                return self._export_markdown(results)
            elif output_format == 'csv':
                return self._export_csv(results)
            else:
                raise Exception(f"不支持的导出格式: {output_format}")
                
        except Exception as e:
            print(f"导出失败: {str(e)}")
            return ""
    
    def _export_json(self, results: List[Dict[str, Any]]) -> str:
        """导出为JSON格式"""
        
        export_data = {
            "export_time": datetime.now().isoformat(),
            "total_documents": len(results),
            "total_chunks": sum(len(result['chunks']) for result in results),
            "documents": results
        }
        
        return json.dumps(export_data, ensure_ascii=False, indent=2)
    
    def _export_txt(self, results: List[Dict[str, Any]]) -> str:
        """导出为纯文本格式"""
        
        lines = []
        lines.append(f"# 文档处理结果导出")
        lines.append(f"导出时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        lines.append(f"文档数量: {len(results)}")
        lines.append(f"总切片数: {sum(len(result['chunks']) for result in results)}")
        lines.append("\n" + "="*50 + "\n")
        
        for i, result in enumerate(results, 1):
            lines.append(f"## 文档 {i}: {result['metadata']['title']}")
            lines.append(f"格式: {result['metadata']['source_format'].upper()}")
            lines.append(f"切片数量: {result['metadata']['total_chunks']}")
            lines.append("")
            
            for j, chunk in enumerate(result['chunks'], 1):
                lines.append(f"### 切片 {j}")
                lines.append(chunk['content'])
                lines.append("")
            
            lines.append("\n" + "-"*30 + "\n")
        
        return "\n".join(lines)
    
    def _export_markdown(self, results: List[Dict[str, Any]]) -> str:
        """导出为Markdown格式"""
        
        lines = []
        lines.append("# 文档处理结果")
        lines.append("")
        lines.append(f"**导出时间**: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        lines.append(f"**文档数量**: {len(results)}")
        lines.append(f"**总切片数**: {sum(len(result['chunks']) for result in results)}")
        lines.append("")
        
        for i, result in enumerate(results, 1):
            metadata = result['metadata']
            lines.append(f"## {i}. {metadata['title']}")
            lines.append("")
            lines.append(f"- **格式**: {metadata['source_format'].upper()}")
            lines.append(f"- **文件大小**: {metadata['file_size_mb']:.2f} MB")
            lines.append(f"- **切片数量**: {metadata['total_chunks']}")
            lines.append(f"- **压缩比**: {metadata['compression_ratio']:.2%}")
            lines.append("")
            
            for j, chunk in enumerate(result['chunks'], 1):
                chunk_meta = chunk.get('metadata', {})
                lines.append(f"### 切片 {j}")
                
                if chunk_meta.get('chunk_type'):
                    lines.append(f"**类型**: {chunk_meta['chunk_type']}")
                if chunk_meta.get('word_count'):
                    lines.append(f"**字数**: {chunk_meta['word_count']}")
                
                lines.append("")
                lines.append(chunk['content'])
                lines.append("")
        
        return "\n".join(lines)
    
    def _export_csv(self, results: List[Dict[str, Any]]) -> str:
        """导出为CSV格式"""
        
        import csv
        from io import StringIO
        
        output = StringIO()
        writer = csv.writer(output)
        
        # 写入标题行
        headers = [
            'document_id', 'document_title', 'source_format', 'chunk_id', 
            'chunk_content', 'chunk_type', 'word_count', 'chunk_level', 
            'parent_id', 'start_page', 'end_page'
        ]
        writer.writerow(headers)
        
        # 写入数据行
        for result in results:
            doc_id = result['document_id']
            doc_title = result['metadata']['title']
            doc_format = result['metadata']['source_format']
            
            for chunk in result['chunks']:
                chunk_meta = chunk.get('metadata', {})
                
                row = [
                    doc_id,
                    doc_title,
                    doc_format,
                    chunk.get('chunk_id', ''),
                    chunk['content'],
                    chunk_meta.get('chunk_type', ''),
                    chunk_meta.get('word_count', ''),
                    chunk_meta.get('level', ''),
                    chunk_meta.get('parent_id', ''),
                    chunk_meta.get('start_page', ''),
                    chunk_meta.get('end_page', '')
                ]
                writer.writerow(row)
        
        return output.getvalue()
    
    def get_processing_stats(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
        """
        获取处理统计信息
        
        Args:
            results: 处理结果列表
            
        Returns:
            Dict[str, Any]: 统计信息
        """
        
        if not results:
            return {}
        
        total_docs = len(results)
        total_chunks = sum(len(result['chunks']) for result in results)
        total_original_length = sum(result['metadata']['original_length'] for result in results)
        total_cleaned_length = sum(result['metadata']['cleaned_length'] for result in results)
        
        # 按格式统计
        format_stats = {}
        for result in results:
            fmt = result['metadata']['source_format']
            if fmt not in format_stats:
                format_stats[fmt] = {'count': 0, 'chunks': 0}
            format_stats[fmt]['count'] += 1
            format_stats[fmt]['chunks'] += len(result['chunks'])
        
        # 切片大小分布
        chunk_sizes = []
        for result in results:
            for chunk in result['chunks']:
                chunk_sizes.append(len(chunk['content']))
        
        stats = {
            'total_documents': total_docs,
            'total_chunks': total_chunks,
            'avg_chunks_per_doc': total_chunks / total_docs if total_docs > 0 else 0,
            'total_original_length': total_original_length,
            'total_cleaned_length': total_cleaned_length,
            'overall_compression_ratio': total_cleaned_length / total_original_length if total_original_length > 0 else 0,
            'format_distribution': format_stats,
            'chunk_size_stats': {
                'min': min(chunk_sizes) if chunk_sizes else 0,
                'max': max(chunk_sizes) if chunk_sizes else 0,
                'avg': sum(chunk_sizes) / len(chunk_sizes) if chunk_sizes else 0
            }
        }
        
        return stats