import os
import uuid
import tempfile
import threading
import asyncio
from fastapi import UploadFile, HTTPException
from fastapi.responses import StreamingResponse
from datetime import datetime, timedelta
import json
import time

from utils.DocTools import RapidOCRDocLoader
from config.crypto_config import DOC_PROMPT_TEMPLATES, SYSTEM_MESSAGE_TEMPLATES
from langchain_core.messages import SystemMessage, HumanMessage

# 全局内存缓存
UPLOAD_CACHE = {}
UPLOAD_CACHE_LOCK = threading.Lock()
# 结构: {file_id: {"path": ..., "progress": ..., "status": ..., "uploaded_bytes": ..., "total_bytes": ..., "speed": ..., "estimated_time": ..., "created_at": ...}}

# 文件过期时间（小时）
FILE_EXPIRY_HOURS = 24

class FileService:
    def __init__(self):
        self.temp_dir = tempfile.gettempdir()
        self.max_file_size = 50 * 1024 * 1024  # 50MB
        self.allowed_extensions = {'.docx', '.pdf', '.txt', '.md'}

    def _get_file_extension(self, filename: str) -> str:
        return os.path.splitext(filename)[-1].lower()

    def _is_valid_file_type(self, filename: str) -> bool:
        return self._get_file_extension(filename) in self.allowed_extensions

    def _is_valid_file_size(self, file_size: int) -> bool:
        return file_size <= self.max_file_size

    async def upload_file(self, file: UploadFile) -> str:
        # 校验类型
        if not self._is_valid_file_type(file.filename):
            raise HTTPException(status_code=415, detail=f"文件格式不支持，支持的类型: {', '.join(self.allowed_extensions)}")
        # 读取内容
        file_content = await file.read()
        file_size = len(file_content)
        if not self._is_valid_file_size(file_size):
            raise HTTPException(status_code=413, detail=f"文件过大，最大支持 {self.max_file_size // (1024*1024)}MB")
        # 生成file_id
        file_id = f"f_{uuid.uuid4().hex[:12]}"
        # 临时文件路径
        temp_path = os.path.join(self.temp_dir, f"{file_id}_{file.filename}")
        with open(temp_path, "wb") as f:
            f.write(file_content)
        # 写入内存缓存
        with UPLOAD_CACHE_LOCK:
            UPLOAD_CACHE[file_id] = {
                "path": temp_path,
                "progress": 0,
                "status": "uploaded",
                "uploaded_bytes": file_size,
                "total_bytes": file_size,
                "speed": file_size,  # 简化
                "estimated_time": 0,
                "file_name": file.filename,
                "file_size": file_size,
                "upload_time": datetime.utcnow().isoformat(),
                "created_at": time.time()
            }
        return file_id

    def get_upload_progress(self, file_id: str):
        with UPLOAD_CACHE_LOCK:
            info = UPLOAD_CACHE.get(file_id)
            if not info:
                raise HTTPException(status_code=404, detail="文件不存在或已被清理")
            return {
                "file_id": file_id,
                "progress": info.get("progress", 0),
                "status": info.get("status", "uploaded"),
                "uploaded_bytes": info.get("uploaded_bytes", 0),
                "total_bytes": info.get("total_bytes", 0),
                "speed": info.get("speed", 0),
                "estimated_time": info.get("estimated_time", 0)
            }

    def cleanup_file(self, file_id: str):
        """清理指定文件及其缓存"""
        with UPLOAD_CACHE_LOCK:
            info = UPLOAD_CACHE.get(file_id)
            if info:
                file_path = info.get("path")
                try:
                    if file_path and os.path.exists(file_path):
                        os.remove(file_path)
                        print(f"[DEBUG] 文件已清理: {file_path}", file=sys.stderr)
                except Exception as e:
                    print(f"[WARNING] 文件清理失败: {e}", file=sys.stderr)
                del UPLOAD_CACHE[file_id]
                print(f"[DEBUG] 缓存已清理: {file_id}", file=sys.stderr)

    def cleanup_expired_files(self):
        """清理过期的文件"""
        current_time = time.time()
        expired_files = []
        
        with UPLOAD_CACHE_LOCK:
            for file_id, info in UPLOAD_CACHE.items():
                created_at = info.get("created_at", 0)
                if current_time - created_at > FILE_EXPIRY_HOURS * 3600:  # 转换为秒
                    expired_files.append(file_id)
            
            for file_id in expired_files:
                self.cleanup_file(file_id)
        
        if expired_files:
            print(f"[DEBUG] 清理了 {len(expired_files)} 个过期文件: {expired_files}", file=sys.stderr)

    async def extract_chapters(self, file_id: str):
        import sys
        with UPLOAD_CACHE_LOCK:
            info = UPLOAD_CACHE.get(file_id)
            if not info:
                raise HTTPException(status_code=404, detail="文件不存在或已被清理")
            file_path = info["path"]
        
        async def generate_chapters_stream():
            try:
                # 1. 读取文档内容
                print(f"[DEBUG] 开始读取文档内容: {file_path}", file=sys.stderr)
                progress_data = {
                    "type": "chapters_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "reading_content",
                        "progress": 10,
                        "message": "正在读取文档内容...",
                        "step_progress": 10
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 10
                        UPLOAD_CACHE[file_id]["status"] = "extracting_chapters"
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 2. 文档解析（提取文本）
                try:
                    print(f"[DEBUG] RapidOCRDocLoader 开始 load: {file_path}", file=sys.stderr)
                    loader = RapidOCRDocLoader(file_path=file_path)
                    loader.load()
                    print(f"[DEBUG] RapidOCRDocLoader load 完成: {file_path}", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] RapidOCRDocLoader load 异常: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'文档解析失败: {str(e)}'})}\n\n"
                    return
                
                progress_data = {
                    "type": "chapters_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "extracting_text",
                        "progress": 30,
                        "message": "文档内容提取完成...",
                        "step_progress": 30
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 30
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 3. 提取章节标题信息
                try:
                    print(f"[DEBUG] RapidOCRDocLoader getDocMDTitle 开始", file=sys.stderr)
                    titles = loader.getDocMDTitle()
                    print(f"[DEBUG] RapidOCRDocLoader getDocMDTitle 完成: {titles}", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] getDocMDTitle 异常: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'章节信息提取失败: {str(e)}'})}\n\n"
                    return
                
                progress_data = {
                    "type": "chapters_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "extracting_chapters",
                        "progress": 50,
                        "message": "章节信息提取完成...",
                        "step_progress": 50
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 50
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 4. 处理章节信息
                try:
                    print(f"[DEBUG] 开始处理章节信息", file=sys.stderr)
                    
                    # 构建树形结构
                    def build_tree_structure(chapters):
                        """将扁平章节列表转换为树形结构"""
                        tree = []
                        stack = []
                        
                        for chapter in chapters:
                            current_level = chapter['level']
                            current_node = {
                                "id": chapter['index'],
                                "title": chapter['clean_title'],  # 使用清理后的标题
                                "level": current_level,
                                "children": []
                            }
                            
                            # 找到当前节点的父节点
                            while stack and stack[-1]['level'] >= current_level:
                                stack.pop()
                            
                            if stack:
                                # 添加到父节点的children中
                                stack[-1]['children'].append(current_node)
                            else:
                                # 根节点
                                tree.append(current_node)
                            
                            stack.append(current_node)
                        
                        return tree
                    
                    # 构建扁平章节列表
                    flat_chapters = []
                    for idx, title in enumerate(titles):
                        # 清理标题中的#符号，但保留章节号
                        clean_title = title.strip('#').strip()
                        
                        chapter_info = {
                            "index": idx + 1,
                            "title": title,  # 保留原始标题
                            "clean_title": clean_title,  # 清理后的标题（去掉#但保留章节号）
                            "level": title.count('#') if title.startswith('#') else 1,
                            "content": title.strip('#').strip()  # 保留原有content字段
                        }
                        flat_chapters.append(chapter_info)
                    
                    # 转换为树形结构
                    tree_structure = build_tree_structure(flat_chapters)
                    
                    print(f"[DEBUG] 章节信息处理完成，树形结构: {tree_structure}", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] 章节信息处理异常: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'章节信息处理失败: {str(e)}'})}\n\n"
                    return
                
                progress_data = {
                    "type": "chapters_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "processing_chapters",
                        "progress": 80,
                        "message": "章节信息处理完成...",
                        "step_progress": 80
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 80
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 5. 提取完成
                print(f"[DEBUG] 章节提取完成，准备推送最终结果", file=sys.stderr)
                chapters_result_data = {
                    "file_name": info['file_name'],
                    "total_chapters": len(flat_chapters),
                    "flat_chapters": flat_chapters,  # 保留扁平结构用于其他用途
                    "tree_structure": tree_structure,  # 新增树形结构
                    "extraction_time": datetime.utcnow().isoformat(),
                }
                complete_data = {
                    "type": "chapters_complete",
                    "data": {
                        "file_id": file_id,
                        "chapters_result": chapters_result_data
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 100
                        UPLOAD_CACHE[file_id]["status"] = "complete"
                print(f"[DEBUG] 最终结果已推送", file=sys.stderr)
                yield f"data: {json.dumps(complete_data, ensure_ascii=False)}\n\n"
            finally:
                # 不在这里清理文件，让文件保持可用状态
                # 文件清理将在用户主动清理或超时时进行
                pass
        
        return StreamingResponse(
            generate_chapters_stream(),
            media_type="text/plain",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "Content-Type": "text/event-stream"
            }
        )

    async def analyze_file(self, file_id: str, options: dict = None):
        import sys
        with UPLOAD_CACHE_LOCK:
            info = UPLOAD_CACHE.get(file_id)
            if not info:
                raise HTTPException(status_code=404, detail="文件不存在或已被清理")
            file_path = info["path"]
        async def generate_analysis_stream():
            try:
                # 1. 读取文档内容
                print(f"[DEBUG] 开始读取文档内容: {file_path}", file=sys.stderr)
                progress_data = {
                    "type": "analysis_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "reading_content",
                        "progress": 10,
                        "message": "正在读取文档内容...",
                        "step_progress": 10
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 10
                        UPLOAD_CACHE[file_id]["status"] = "analyzing"
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 2. 文档解析（提取文本）
                try:
                    print(f"[DEBUG] RapidOCRDocLoader 开始 load: {file_path}", file=sys.stderr)
                    
                    # 处理 docKeyExtraction 参数
                    custom_doc_key_extraction = None
                    if options and options.get("docKeyExtraction"):
                        try:
                            custom_doc_key_extraction = json.loads(options["docKeyExtraction"])
                            print(f"[DEBUG] 使用自定义 docKeyExtraction: {custom_doc_key_extraction}", file=sys.stderr)
                        except json.JSONDecodeError as e:
                            print(f"[WARNING] docKeyExtraction JSON 解析失败: {e}", file=sys.stderr)
                    
                    loader = RapidOCRDocLoader(file_path=file_path, custom_doc_key_extraction=custom_doc_key_extraction)
                    # 每次分析前重置，防止内容累加
                    loader.CRYPTO_EVAL_INFORMATION = {
                        "KEY": "",
                        "AUTH": "",
                        "ACCESS_CONTROL": "",
                        "KEY_DATA_TRANSPORT_CONFIDENTIALITY_AND_INTEGRITY": "",
                        "KEY_DATA_STORE_CONFIDENTIALITY": "",
                        "KEY_DATA_STORE_INTEGRITY": "",
                        "KEY_DATA_SAFE_MARK_INTEGRITY": "",
                        "NON-REPUDIATION": "",
                    }
                    loader.load()
                    print(f"[DEBUG] RapidOCRDocLoader load 完成: {file_path}", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] RapidOCRDocLoader load 异常: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'文档解析失败: {str(e)}'})}\n\n"
                    return
                progress_data = {
                    "type": "analysis_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "extracting_text",
                        "progress": 30,
                        "message": "文档内容提取完成...",
                        "step_progress": 30
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 30
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 3. 结构化信息提取
                try:
                    print(f"[DEBUG] RapidOCRDocLoader getDockKeyParagraph 开始", file=sys.stderr)
                    crypto_eval_information = loader.getDockKeyParagraph()
                    print(f"[DEBUG] RapidOCRDocLoader getDockKeyParagraph 完成: {crypto_eval_information}", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] getDockKeyParagraph 异常: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'结构化信息提取失败: {str(e)}'})}\n\n"
                    return
                progress_data = {
                    "type": "analysis_progress",
                    "data": {
                        "file_id": file_id,
                        "step": "analyzing_structure",
                        "progress": 50,
                        "message": "结构化信息提取完成...",
                        "step_progress": 50
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 50
                yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                await asyncio.sleep(0.5)

                # 4. LLM结构化处理
                system_message = SystemMessage(content=SYSTEM_MESSAGE_TEMPLATES)
                crypto_eval_extract = {
                    "KEY":  "",
                    "AUTH": "",
                    "ACCESS_CONTROL": "",
                    "KEY_DATA_TRANSPORT_CONFIDENTIALITY_AND_INTEGRITY": "",
                    "KEY_DATA_STORE_CONFIDENTIALITY": "",
                    "KEY_DATA_STORE_INTEGRITY": "",
                    "KEY_DATA_SAFE_MARK_INTEGRITY": "",
                    "NON-REPUDIATION": "",
                }
                
                # 定义key到中文标题的映射
                key_title_map = {
                    "KEY": "信息种类及关键数据类型",
                    "AUTH": "身份鉴别",
                    "ACCESS_CONTROL": "访问控制信息完整性保护",
                    "KEY_DATA_TRANSPORT_CONFIDENTIALITY_AND_INTEGRITY": "重要数据传输机密性和完整性",
                    "KEY_DATA_STORE_CONFIDENTIALITY": "重要数据存储机密性",
                    "KEY_DATA_STORE_INTEGRITY": "重要数据存储完整性",
                    "KEY_DATA_SAFE_MARK_INTEGRITY": "重要信息资源安全标记完整性",
                    "NON-REPUDIATION": "不可否认性"
                }
                
                # 初始化LLM服务，使用更好的错误处理
                try:
                    from services.file.llm_service import LLMService
                    llm_service = LLMService()
                    llm = llm_service.get_llm()
                    print(f"[DEBUG] LLM服务初始化成功", file=sys.stderr)
                except Exception as e:
                    print(f"[ERROR] LLM服务初始化失败: {e}", file=sys.stderr)
                    yield f"data: {json.dumps({'type': 'error', 'message': f'LLM服务初始化失败: {str(e)}'})}\n\n"
                    return
                
                for idx, key in enumerate(crypto_eval_extract):
                    prompt_template = DOC_PROMPT_TEMPLATES.get(f"{key}_INFORMATION")
                    if prompt_template and crypto_eval_information.get(key):
                        prompt_message = prompt_template.format(doc_paragraph=crypto_eval_information[key])
                        query_messages = [system_message, HumanMessage(content=prompt_message)]
                        try:
                            print(f"[DEBUG] LLM 处理 {key} 开始", file=sys.stderr)
                            llm_response = llm(query_messages)
                            crypto_eval_extract[key] = llm_response.content
                            print(f"[DEBUG] LLM 处理 {key} 完成", file=sys.stderr)
                        except Exception as e:
                            print(f"[ERROR] LLM 处理 {key} 异常: {e}", file=sys.stderr)
                            # 不直接返回，而是继续处理其他部分
                            crypto_eval_extract[key] = f"处理失败: {str(e)}"
                    # 每处理一个key推送一次进度
                    progress = 60 + int(30 * (idx + 1) / len(crypto_eval_extract))
                    # 使用中文标题显示进度
                    chinese_title = key_title_map.get(key, key)
                    progress_data = {
                        "type": "analysis_progress",
                        "data": {
                            "file_id": file_id,
                            "step": "generating_summary",
                            "progress": progress,
                            "message": f"正在处理 {chinese_title} ...",
                            "step_progress": progress
                        }
                    }
                    with UPLOAD_CACHE_LOCK:
                        if file_id in UPLOAD_CACHE:
                            UPLOAD_CACHE[file_id]["progress"] = progress
                    yield f"data: {json.dumps(progress_data, ensure_ascii=False)}\n\n"
                    await asyncio.sleep(0.2)

                # 5. 分析完成
                print(f"[DEBUG] 分析完成，准备推送最终结果", file=sys.stderr)
                sections = []
                for key, title in key_title_map.items():
                    sections.append({
                        "title": title,
                        "content": crypto_eval_extract.get(key, ""),
                        "raw_content": crypto_eval_information.get(key, "")  # 新增每个结构化内容的原文
                    })
                # 获取文档原文内容
                try:
                    raw_content_list = loader.getDocMDContent()
                    if isinstance(raw_content_list, list):
                        raw_content = "\n".join(raw_content_list)
                    else:
                        raw_content = str(raw_content_list)
                except Exception as e:
                    print(f"[ERROR] 获取文档原文内容失败: {e}", file=sys.stderr)
                    raw_content = ""
                analysis_result_data = {
                    "summary": f"这是对文件 {info['file_name']} 的分析摘要",
                    "structure": {
                        "sections": sections
                    },
                    "analysis_time": datetime.utcnow().isoformat(),
                }
                complete_data = {
                    "type": "analysis_complete",
                    "data": {
                        "file_id": file_id,
                        "analysis_result": analysis_result_data
                    }
                }
                with UPLOAD_CACHE_LOCK:
                    if file_id in UPLOAD_CACHE:
                        UPLOAD_CACHE[file_id]["progress"] = 100
                        UPLOAD_CACHE[file_id]["status"] = "complete"
                print(f"[DEBUG] 最终结果已推送", file=sys.stderr)
                yield f"data: {json.dumps(complete_data, ensure_ascii=False)}\n\n"
            finally:
                # 不在这里清理文件，让文件保持可用状态
                # 文件清理将在用户主动清理或超时时进行
                pass
        return StreamingResponse(
            generate_analysis_stream(),
            media_type="text/plain",
            headers={
                "Cache-Control": "no-cache",
                "Connection": "keep-alive",
                "Content-Type": "text/event-stream"
            }
        ) 