#!/usr/bin/env python3
# -*- coding: utf-8
"""
PDF文件处理服务
支持从指定文件夹读取PDF文件，提取文本内容并进行分块处理
"""

import os
import re
import uuid
from typing import List, Dict, Any, Optional
from pathlib import Path
import fitz  # PyMuPDF
import pdfplumber
from utils.logger import emobot_logger

logger = emobot_logger.get_logger()


class PDFProcessor:
    """PDF文件处理器"""
    
    def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200):
        """
        初始化PDF处理器
        
        Args:
            chunk_size: 文本块大小（字符数）
            chunk_overlap: 文本块重叠大小（字符数）
        """
        self.chunk_size = chunk_size
        self.chunk_overlap = chunk_overlap
        logger.info(f"PDF处理器初始化完成 - 块大小: {chunk_size}, 重叠: {chunk_overlap}")
    
    def extract_text_from_pdf(self, pdf_path: str) -> Dict[str, Any]:
        """
        从PDF文件中提取文本内容
        
        Args:
            pdf_path: PDF文件路径
            
        Returns:
            Dict包含提取的文本内容和元数据
        """
        try:
            logger.info(f"开始处理PDF文件: {pdf_path}")
            
            # 使用PyMuPDF提取文本
            doc = fitz.open(pdf_path)
            full_text = ""
            page_texts = []
            
            for page_num in range(len(doc)):
                page = doc.load_page(page_num)
                page_text = page.get_text()
                page_texts.append({
                    "page_number": page_num + 1,
                    "text": page_text,
                    "char_count": len(page_text)
                })
                full_text += page_text + "\n"
            
            doc.close()
            
            # 清理文本
            cleaned_text = self._clean_text(full_text)
            
            result = {
                "file_path": pdf_path,
                "file_name": os.path.basename(pdf_path),
                "total_pages": len(page_texts),
                "total_characters": len(cleaned_text),
                "full_text": cleaned_text,
                "page_texts": page_texts,
                "extraction_method": "PyMuPDF"
            }
            
            logger.info(f"PDF文本提取完成: {result['file_name']}, 页数: {result['total_pages']}, 字符数: {result['total_characters']}")
            return result
            
        except Exception as e:
            logger.error(f"PDF文本提取失败 {pdf_path}: {e}")
            return {
                "file_path": pdf_path,
                "file_name": os.path.basename(pdf_path),
                "error": str(e),
                "extraction_method": "PyMuPDF"
            }
    
    def _clean_text(self, text: str) -> str:
        """
        清理提取的文本
        
        Args:
            text: 原始文本
            
        Returns:
            清理后的文本
        """
        # 移除多余的空白字符
        text = re.sub(r'\s+', ' ', text)
        # 移除特殊字符但保留中文标点
        text = re.sub(r'[^\w\s\u4e00-\u9fff\u3000-\u303f\uff00-\uffef]', '', text)
        # 移除多余的空行
        text = re.sub(r'\n\s*\n', '\n', text)
        return text.strip()
    
    def split_text_into_chunks(self, text: str, metadata: Dict[str, Any] = None) -> List[Dict[str, Any]]:
        """
        将文本分割成块
        
        Args:
            text: 要分割的文本
            metadata: 元数据信息
            
        Returns:
            文本块列表
        """
        if not text or len(text.strip()) == 0:
            return []
        
        chunks = []
        start = 0
        chunk_id = 0
        
        while start < len(text):
            # 计算块的结束位置
            end = start + self.chunk_size
            
            # 如果不是最后一块，尝试在句号或换行处分割
            if end < len(text):
                # 寻找最近的句号、问号、感叹号或换行符
                for i in range(end, max(start + self.chunk_size // 2, start), -1):
                    if text[i] in '。！？\n':
                        end = i + 1
                        break
            
            chunk_text = text[start:end].strip()
            
            if chunk_text:
                chunk_metadata = {
                    "chunk_id": str(uuid.uuid4()),
                    "chunk_index": chunk_id,
                    "start_pos": start,
                    "end_pos": end,
                    "char_count": len(chunk_text),
                    "text": chunk_text
                }
                
                # 添加文件元数据
                if metadata:
                    chunk_metadata.update({
                        "file_path": metadata.get("file_path", ""),
                        "file_name": metadata.get("file_name", ""),
                        "total_pages": metadata.get("total_pages", 0),
                        "page_number": self._get_page_number_for_chunk(start, metadata.get("page_texts", [])),
                        "content_type": "psychology_pdf"
                    })
                
                chunks.append(chunk_metadata)
                chunk_id += 1
            
            # 计算下一个块的开始位置（考虑重叠）
            start = end - self.chunk_overlap
            if start >= len(text):
                break
        
        logger.info(f"文本分割完成: {len(chunks)} 个块")
        return chunks
    
    def _get_page_number_for_chunk(self, start_pos: int, page_texts: List[Dict]) -> int:
        """
        根据字符位置确定文本块所在的页码
        
        Args:
            start_pos: 文本块开始位置
            page_texts: 页面文本信息
            
        Returns:
            页码
        """
        current_pos = 0
        for page_info in page_texts:
            current_pos += page_info["char_count"]
            if start_pos < current_pos:
                return page_info["page_number"]
        return 1  # 默认返回第一页
    
    def process_pdf_folder(self, folder_path: str) -> List[Dict[str, Any]]:
        """
        处理文件夹中的所有PDF文件
        
        Args:
            folder_path: PDF文件夹路径
            
        Returns:
            处理结果列表
        """
        folder_path = Path(folder_path)
        if not folder_path.exists():
            logger.error(f"文件夹不存在: {folder_path}")
            return []
        
        pdf_files = list(folder_path.glob("*.pdf"))
        if not pdf_files:
            logger.warning(f"文件夹中没有找到PDF文件: {folder_path}")
            return []
        
        logger.info(f"找到 {len(pdf_files)} 个PDF文件")
        
        all_chunks = []
        processed_files = []
        
        for pdf_file in pdf_files:
            try:
                logger.info(f"处理PDF文件: {pdf_file.name}")
                
                # 提取文本
                extraction_result = self.extract_text_from_pdf(str(pdf_file))
                
                if "error" in extraction_result:
                    logger.error(f"PDF处理失败: {pdf_file.name} - {extraction_result['error']}")
                    continue
                
                # 分割文本
                chunks = self.split_text_into_chunks(
                    extraction_result["full_text"], 
                    extraction_result
                )
                
                all_chunks.extend(chunks)
                processed_files.append({
                    "file_name": extraction_result["file_name"],
                    "file_path": extraction_result["file_path"],
                    "total_pages": extraction_result["total_pages"],
                    "total_characters": extraction_result["total_characters"],
                    "chunks_count": len(chunks),
                    "status": "success"
                })
                
                logger.info(f"PDF处理完成: {pdf_file.name} - {len(chunks)} 个文本块")
                
            except Exception as e:
                logger.error(f"处理PDF文件时出错 {pdf_file.name}: {e}")
                processed_files.append({
                    "file_name": pdf_file.name,
                    "file_path": str(pdf_file),
                    "status": "error",
                    "error": str(e)
                })
        
        logger.info(f"文件夹处理完成: {len(processed_files)} 个文件, {len(all_chunks)} 个文本块")
        
        return {
            "processed_files": processed_files,
            "all_chunks": all_chunks,
            "total_files": len(pdf_files),
            "successful_files": len([f for f in processed_files if f.get("status") == "success"]),
            "total_chunks": len(all_chunks)
        }


def create_pdf_processor(chunk_size: int = 1000, chunk_overlap: int = 200) -> PDFProcessor:
    """
    创建PDF处理器实例
    
    Args:
        chunk_size: 文本块大小
        chunk_overlap: 文本块重叠大小
        
    Returns:
        PDFProcessor实例
    """
    return PDFProcessor(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
