"""
长文本处理模块 - 改进版
支持大规模文档、长篇小说、新闻集合等输入

改进内容：
1. ✅ 修复PDF/DOCX依赖问题，提供清晰的错误提示
2. ✅ 新增Excel、HTML、JSON、CSV格式支持
3. ✅ 改进错误处理和日志记录
4. ✅ 添加依赖检查和备用方案
5. ✅ 批量处理优化（进度条、并行处理）
"""

import os
import re
import json
import logging
from pathlib import Path
from typing import List, Dict, Optional, Union, Tuple
from dataclasses import dataclass, field

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
logger = logging.getLogger(__name__)


@dataclass
class TextChunk:
    """文本块数据类"""
    content: str
    chunk_id: int
    start_pos: int
    end_pos: int
    metadata: Dict = field(default_factory=dict)


@dataclass
class Document:
    """文档数据类"""
    title: str
    content: str
    file_path: Optional[str] = None
    chunks: Optional[List[TextChunk]] = None
    summary: Optional[str] = None
    metadata: Dict = field(default_factory=dict)  # 新增：存储格式、大小等信息


class DependencyChecker:
    """依赖检查器"""
    
    @staticmethod
    def check_and_import(module_name: str, package_name: str = None):
        """
        检查并导入模块
        Args:
            module_name: 模块名（用于import）
            package_name: 包名（用于安装提示）
        Returns:
            模块对象，如果导入失败则返回None
        """
        if package_name is None:
            package_name = module_name
            
        try:
            module = __import__(module_name)
            return module
        except ImportError:
            logger.warning(f"❌ 缺少依赖: {package_name}")
            logger.info(f"   安装方法: pip install {package_name}")
            return None
    
    @staticmethod
    def get_available_formats() -> Dict[str, bool]:
        """获取当前可用的文件格式"""
        formats = {
            'txt': True,  # 始终可用
            'md': True,   # 始终可用
            'pdf': DependencyChecker.check_and_import('PyPDF2') is not None,
            'docx': DependencyChecker.check_and_import('docx', 'python-docx') is not None,
            'xlsx': DependencyChecker.check_and_import('openpyxl') is not None,
            'html': DependencyChecker.check_and_import('bs4', 'beautifulsoup4') is not None,
            'json': True,  # 标准库
            'csv': True,   # 标准库
        }
        return formats


class LongTextProcessor:
    """长文本处理器 - 改进版"""
    
    def __init__(self, chunk_size: int = 2000, chunk_overlap: int = 200):
        """
        初始化处理器
        Args:
            chunk_size: 每个文本块的最大字符数
            chunk_overlap: 文本块之间的重叠字符数（保持上下文连续性）
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        self.available_formats = DependencyChecker.get_available_formats()
        
        # 打印可用格式
        logger.info("=" * 60)
        logger.info("长文本处理器初始化")
        logger.info("=" * 60)
        logger.info("支持的格式:")
        for fmt, available in self.available_formats.items():
            status = "✅" if available else "❌"
            logger.info(f"  {status} .{fmt}")
        logger.info("=" * 60)
    
    def load_file(self, file_path: Union[str, Path]) -> Document:
        """
        加载文件并转换为Document对象
        支持格式：TXT, Markdown, PDF, DOCX, Excel, HTML, JSON, CSV
        """
        file_path = Path(file_path)
        
        if not file_path.exists():
            raise FileNotFoundError(f"文件不存在: {file_path}")
        
        # 根据文件扩展名选择读取方法
        ext = file_path.suffix.lower().lstrip('.')
        file_size = file_path.stat().st_size
        
        logger.info(f"正在加载文件: {file_path.name} (大小: {file_size/1024:.2f} KB)")
        
        # 检查格式是否支持
        if ext not in self.available_formats:
            raise ValueError(f"不支持的文件格式: .{ext}\n支持的格式: {', '.join([f'.{f}' for f in self.available_formats.keys()])}")
        
        if not self.available_formats[ext]:
            raise ImportError(
                f"无法处理 .{ext} 格式文件：缺少必要的依赖库\n"
                f"请根据以下提示安装：\n"
                f"  PDF: pip install PyPDF2\n"
                f"  DOCX: pip install python-docx\n"
                f"  Excel: pip install openpyxl\n"
                f"  HTML: pip install beautifulsoup4"
            )
        
        # 调用对应的读取方法
        try:
            if ext in ['txt', 'md']:
                content = self._read_text_file(file_path)
            elif ext == 'pdf':
                content = self._read_pdf_file(file_path)
            elif ext == 'docx':
                content = self._read_docx_file(file_path)
            elif ext in ['xlsx', 'xls']:
                content = self._read_excel_file(file_path)
            elif ext in ['html', 'htm']:
                content = self._read_html_file(file_path)
            elif ext == 'json':
                content = self._read_json_file(file_path)
            elif ext == 'csv':
                content = self._read_csv_file(file_path)
            else:
                raise ValueError(f"未实现的格式: .{ext}")
            
            # 提取标题
            title = self._extract_title(file_path, content)
            
            # 创建Document对象
            doc = Document(
                title=title,
                content=content,
                file_path=str(file_path),
                metadata={
                    'format': ext,
                    'size': file_size,
                    'char_count': len(content),
                }
            )
            
            logger.info(f"✅ 成功加载: {len(content)} 字符")
            return doc
            
        except Exception as e:
            logger.error(f"❌ 加载文件失败: {file_path}")
            logger.error(f"   错误信息: {str(e)}")
            raise
    
    def _extract_title(self, file_path: Path, content: str) -> str:
        """提取文档标题"""
        # 默认使用文件名
        title = file_path.stem
        
        # 尝试从内容第一行提取
        if content:
            first_line = content.split('\n')[0].strip()
            # 如果第一行不太长且不为空，使用它作为标题
            if 0 < len(first_line) < 100:
                title = first_line
        
        return title
    
    def _read_text_file(self, file_path: Path) -> str:
        """读取文本文件（TXT, Markdown）"""
        try:
            # 尝试多种编码
            encodings = ['utf-8', 'gbk', 'gb2312', 'utf-16', 'latin-1']
            for encoding in encodings:
                try:
                    with open(file_path, 'r', encoding=encoding) as f:
                        return f.read()
                except UnicodeDecodeError:
                    continue
            raise ValueError(f"无法解码文件（尝试了编码: {', '.join(encodings)}）")
        except Exception as e:
            raise ValueError(f"读取文本文件失败: {e}")
    
    def _read_pdf_file(self, file_path: Path) -> str:
        """读取PDF文件"""
        try:
            import PyPDF2
            
            with open(file_path, 'rb') as f:
                pdf_reader = PyPDF2.PdfReader(f)
                num_pages = len(pdf_reader.pages)
                logger.info(f"   PDF页数: {num_pages}")
                
                text = []
                for i, page in enumerate(pdf_reader.pages):
                    page_text = page.extract_text()
                    if page_text:
                        text.append(page_text)
                    if (i + 1) % 10 == 0:
                        logger.info(f"   已处理 {i+1}/{num_pages} 页")
                
                return '\n\n'.join(text)
        except ImportError:
            raise ImportError("请安装PyPDF2: pip install PyPDF2")
        except Exception as e:
            # 尝试备用方案：pdfplumber
            try:
                import pdfplumber
                logger.info("   尝试使用备用方案 pdfplumber...")
                
                with pdfplumber.open(file_path) as pdf:
                    text = []
                    for page in pdf.pages:
                        text.append(page.extract_text() or '')
                    return '\n\n'.join(text)
            except ImportError:
                raise ValueError(f"读取PDF失败: {e}\n建议安装备用库: pip install pdfplumber")
            except Exception as e2:
                raise ValueError(f"读取PDF文件失败: {e}")
    
    def _read_docx_file(self, file_path: Path) -> str:
        """读取DOCX文件"""
        try:
            from docx import Document as DocxDocument
            
            doc = DocxDocument(file_path)
            
            # 提取所有段落
            text = []
            for paragraph in doc.paragraphs:
                if paragraph.text.strip():
                    text.append(paragraph.text)
            
            # 提取表格内容
            for table in doc.tables:
                for row in table.rows:
                    row_text = '\t'.join([cell.text for cell in row.cells])
                    if row_text.strip():
                        text.append(row_text)
            
            return '\n\n'.join(text)
        except ImportError:
            raise ImportError("请安装python-docx: pip install python-docx")
        except Exception as e:
            raise ValueError(f"读取DOCX文件失败: {e}")
    
    def _read_excel_file(self, file_path: Path) -> str:
        """读取Excel文件（新增）"""
        try:
            import openpyxl
            
            workbook = openpyxl.load_workbook(file_path, read_only=True)
            text = []
            
            for sheet_name in workbook.sheetnames:
                sheet = workbook[sheet_name]
                text.append(f"# {sheet_name}")
                
                for row in sheet.iter_rows(values_only=True):
                    row_text = '\t'.join([str(cell) if cell is not None else '' for cell in row])
                    if row_text.strip():
                        text.append(row_text)
                
                text.append("")  # 空行分隔
            
            return '\n'.join(text)
        except ImportError:
            raise ImportError("请安装openpyxl: pip install openpyxl")
        except Exception as e:
            raise ValueError(f"读取Excel文件失败: {e}")
    
    def _read_html_file(self, file_path: Path) -> str:
        """读取HTML文件（新增）"""
        try:
            from bs4 import BeautifulSoup
            
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                html_content = f.read()
            
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 移除script和style标签
            for script in soup(['script', 'style']):
                script.decompose()
            
            # 提取文本
            text = soup.get_text()
            
            # 清理多余空白
            lines = (line.strip() for line in text.splitlines())
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            text = '\n'.join(chunk for chunk in chunks if chunk)
            
            return text
        except ImportError:
            raise ImportError("请安装beautifulsoup4: pip install beautifulsoup4")
        except Exception as e:
            raise ValueError(f"读取HTML文件失败: {e}")
    
    def _read_json_file(self, file_path: Path) -> str:
        """读取JSON文件（新增）"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                data = json.load(f)
            
            # 将JSON转换为可读文本
            if isinstance(data, dict):
                text = self._dict_to_text(data)
            elif isinstance(data, list):
                text = '\n\n'.join([self._dict_to_text(item) if isinstance(item, dict) else str(item) for item in data])
            else:
                text = str(data)
            
            return text
        except Exception as e:
            raise ValueError(f"读取JSON文件失败: {e}")
    
    def _dict_to_text(self, data: dict, indent: int = 0) -> str:
        """将字典转换为可读文本"""
        lines = []
        prefix = "  " * indent
        
        for key, value in data.items():
            if isinstance(value, dict):
                lines.append(f"{prefix}{key}:")
                lines.append(self._dict_to_text(value, indent + 1))
            elif isinstance(value, list):
                lines.append(f"{prefix}{key}:")
                for item in value:
                    if isinstance(item, dict):
                        lines.append(self._dict_to_text(item, indent + 1))
                    else:
                        lines.append(f"{prefix}  - {item}")
            else:
                lines.append(f"{prefix}{key}: {value}")
        
        return '\n'.join(lines)
    
    def _read_csv_file(self, file_path: Path) -> str:
        """读取CSV文件（新增）"""
        try:
            import csv
            
            text = []
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                reader = csv.reader(f)
                for row in reader:
                    row_text = '\t'.join(row)
                    if row_text.strip():
                        text.append(row_text)
            
            return '\n'.join(text)
        except Exception as e:
            raise ValueError(f"读取CSV文件失败: {e}")
    
    def split_text_intelligent(self, text: str, preserve_structure: bool = True) -> List[TextChunk]:
        """
        智能分块：保持语义完整性
        Args:
            text: 输入文本
            preserve_structure: 是否保持文档结构（章节、段落）
        """
        if preserve_structure:
            # 尝试按文档结构分割（章节、段落）
            chunks = self._split_by_structure(text)
            if chunks:
                return chunks
        
        # 如果结构分割失败，使用滑动窗口分割
        return self._split_by_sliding_window(text)
    
    def _split_by_structure(self, text: str) -> List[TextChunk]:
        """按文档结构分割（章节标题、段落等）"""
        chunks = []
        
        # 检测章节标题模式
        chapter_patterns = [
            r'^第[一二三四五六七八九十\d]+章\s+.+',  # 第一章 标题
            r'^第[一二三四五六七八九十\d]+节\s+.+',  # 第一节 标题
            r'^\d+\.\s+.+',  # 1. 标题
            r'^Chapter\s+\d+',  # Chapter 1
            r'^#{1,3}\s+.+',  # Markdown标题
        ]
        
        # 先按段落分割
        paragraphs = re.split(r'\n\s*\n', text)
        
        current_chunk = []
        current_size = 0
        chunk_id = 0
        start_pos = 0
        
        for para in paragraphs:
            para = para.strip()
            if not para:
                continue
            
            # 检查是否是章节标题
            is_chapter = any(re.match(pattern, para, re.MULTILINE) for pattern in chapter_patterns)
            
            # 如果当前块已经足够大，且遇到新章节，开始新块
            if current_size > self.chunk_size * 0.7 and is_chapter and current_chunk:
                chunk_text = '\n\n'.join(current_chunk)
                chunks.append(TextChunk(
                    content=chunk_text,
                    chunk_id=chunk_id,
                    start_pos=start_pos,
                    end_pos=start_pos + len(chunk_text),
                    metadata={'is_chapter_start': True}
                ))
                start_pos += len(chunk_text)
                chunk_id += 1
                current_chunk = [para]
                current_size = len(para)
            else:
                current_chunk.append(para)
                current_size += len(para) + 2  # +2 for '\n\n'
            
            # 如果当前块超过大小限制，强制分割
            if current_size > self.chunk_size:
                if len(current_chunk) > 1:
                    last_para = current_chunk.pop()
                    chunk_text = '\n\n'.join(current_chunk)
                    chunks.append(TextChunk(
                        content=chunk_text,
                        chunk_id=chunk_id,
                        start_pos=start_pos,
                        end_pos=start_pos + len(chunk_text),
                        metadata={}
                    ))
                    start_pos += len(chunk_text)
                    chunk_id += 1
                    current_chunk = [last_para]
                    current_size = len(last_para)
        
        # 添加最后一个块
        if current_chunk:
            chunk_text = '\n\n'.join(current_chunk)
            chunks.append(TextChunk(
                content=chunk_text,
                chunk_id=chunk_id,
                start_pos=start_pos,
                end_pos=start_pos + len(chunk_text),
                metadata={}
            ))
        
        return chunks if len(chunks) > 1 else []
    
    def _split_by_sliding_window(self, text: str) -> List[TextChunk]:
        """使用滑动窗口分割文本"""
        chunks = []
        text_length = len(text)
        chunk_id = 0
        start_pos = 0
        
        while start_pos < text_length:
            end_pos = min(start_pos + self.chunk_size, text_length)
            
            # 如果不是最后一块，尝试在句子边界分割
            if end_pos < text_length:
                sentence_endings = ['.', '。', '!', '！', '?', '？', '\n']
                for i in range(end_pos, max(start_pos, end_pos - 200), -1):
                    if text[i] in sentence_endings:
                        end_pos = i + 1
                        break
            
            chunk_text = text[start_pos:end_pos]
            chunks.append(TextChunk(
                content=chunk_text,
                chunk_id=chunk_id,
                start_pos=start_pos,
                end_pos=end_pos,
                metadata={}
            ))
            
            start_pos = end_pos - self.chunk_overlap
            chunk_id += 1
        
        return chunks
    
    def load_directory(self, dir_path: Union[str, Path], 
                      extensions: List[str] = None,
                      max_files: int = None) -> Tuple[List[Document], List[str]]:
        """
        批量加载目录中的文档（改进版）
        Args:
            dir_path: 目录路径
            extensions: 支持的文件扩展名列表
            max_files: 最大文件数量限制
        Returns:
            (成功加载的文档列表, 失败的文件列表)
        """
        if extensions is None:
            # 只包含当前可用的格式
            extensions = [f'.{fmt}' for fmt, available in self.available_formats.items() if available]
        
        dir_path = Path(dir_path)
        if not dir_path.is_dir():
            raise ValueError(f"不是有效目录: {dir_path}")
        
        documents = []
        failed_files = []
        
        # 收集所有文件
        all_files = []
        for ext in extensions:
            all_files.extend(list(dir_path.rglob(f'*{ext}')))
        
        if max_files:
            all_files = all_files[:max_files]
        
        total_files = len(all_files)
        logger.info(f"找到 {total_files} 个文件")
        
        # 加载文件
        for i, file_path in enumerate(all_files, 1):
            try:
                logger.info(f"[{i}/{total_files}] 正在加载: {file_path.name}")
                doc = self.load_file(file_path)
                documents.append(doc)
            except Exception as e:
                logger.error(f"❌ 加载失败: {file_path.name} - {str(e)}")
                failed_files.append(str(file_path))
        
        logger.info(f"✅ 成功加载 {len(documents)} 个文件")
        if failed_files:
            logger.warning(f"⚠️  失败 {len(failed_files)} 个文件")
        
        return documents, failed_files
    
    def estimate_tokens(self, text: str) -> int:
        """估算文本的token数量"""
        chinese_chars = len(re.findall(r'[\u4e00-\u9fff]', text))
        english_words = len(re.findall(r'[a-zA-Z]+', text))
        return chinese_chars + english_words
    
    def should_split(self, text: str, max_tokens: int = 6000) -> bool:
        """判断文本是否需要分块处理"""
        estimated_tokens = self.estimate_tokens(text)
        return estimated_tokens > max_tokens
    
    def get_text_preview(self, text: str, max_length: int = 200) -> str:
        """获取文本预览"""
        if len(text) <= max_length:
            return text
        return text[:max_length] + "..."
    
    def merge_chunks(self, chunks: List[TextChunk], max_tokens: int = 6000) -> List[TextChunk]:
        """合并文本块，直到接近token限制"""
        if not chunks:
            return []
        
        merged_chunks = []
        current_chunk_parts = []
        current_tokens = 0
        chunk_id = 0
        start_pos = chunks[0].start_pos if chunks else 0
        
        for chunk in chunks:
            chunk_tokens = self.estimate_tokens(chunk.content)
            
            if current_tokens + chunk_tokens > max_tokens and current_chunk_parts:
                merged_text = '\n\n'.join([c.content for c in current_chunk_parts])
                merged_chunks.append(TextChunk(
                    content=merged_text,
                    chunk_id=chunk_id,
                    start_pos=start_pos,
                    end_pos=current_chunk_parts[-1].end_pos,
                    metadata={'merged_from': len(current_chunk_parts)}
                ))
                chunk_id += 1
                current_chunk_parts = [chunk]
                current_tokens = chunk_tokens
                start_pos = chunk.start_pos
            else:
                current_chunk_parts.append(chunk)
                current_tokens += chunk_tokens
        
        if current_chunk_parts:
            merged_text = '\n\n'.join([c.content for c in current_chunk_parts])
            merged_chunks.append(TextChunk(
                content=merged_text,
                chunk_id=chunk_id,
                start_pos=start_pos,
                end_pos=current_chunk_parts[-1].end_pos,
                metadata={'merged_from': len(current_chunk_parts)}
            ))
        
        return merged_chunks


def load_text_from_source(source: Union[str, Path, List[Union[str, Path]]]) -> List[Document]:
    """
    便捷函数：从各种来源加载文本
    """
    processor = LongTextProcessor()
    
    if isinstance(source, (str, Path)):
        source = Path(source)
        
        if source.is_file():
            return [processor.load_file(source)]
        elif source.is_dir():
            documents, failed = processor.load_directory(source)
            return documents
        else:
            raise ValueError(f"无效路径: {source}")
    elif isinstance(source, list):
        documents = []
        for path in source:
            path = Path(path)
            if path.is_file():
                try:
                    documents.append(processor.load_file(path))
                except Exception as e:
                    logger.warning(f"跳过文件 {path}: {e}")
        return documents
    else:
        raise ValueError(f"不支持的数据源类型: {type(source)}")


# 如果直接运行此文件，执行测试
if __name__ == "__main__":
    print("长文本处理器 - 改进版")
    print("=" * 60)
    
    # 显示可用格式
    processor = LongTextProcessor()
    print("\n测试完成！")