import sys
import os
# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from fastapi import FastAPI, HTTPException, BackgroundTasks, Query
import json
import time
import asyncio
from concurrent.futures import ThreadPoolExecutor
import aiohttp
from asyncio import Queue
from contextlib import asynccontextmanager # 导入 asynccontextmanager
from fastapi.staticfiles import StaticFiles
from datetime import datetime, timedelta
from fastapi.responses import FileResponse

from common.train_dict import load_excel_dict  # 导入字典处理函数
from common.person_dict import process_persons_list  # 导入人员字典处理模块
from common.workflow_client import WorkflowClientManager
from common.performance_monitor import PerformanceMonitor
from common.request_processor import RequestProcessor
from common.models import BaseLetterRequest, LetterResponse

from laixin.config import current_config  # 导入配置
from laixin.laixin_logger import log_api_request, log_workflow_output, log_api_response, log_error, log_info # 导入日志模块
from laixin.laixin_process_fyrs import process_fyrs_section  # 导入反映人处理模块
from laixin.laixin_process_bfyrs import process_bfyrs_section  # 导入被反映人处理模块

# 设置工作流超时时间（秒）
WORKFLOW_TIMEOUT = 300

# 创建线程池
thread_pool = ThreadPoolExecutor(max_workers=15)

# 创建全局会话池
session_pool = None

# 创建性能监控实例
performance_monitor = PerformanceMonitor()

# 创建请求队列和信号量
REQUEST_QUEUE = Queue(maxsize=100)
REQUEST_SEMAPHORE = asyncio.Semaphore(10)  # 调整并发数到10

# 新增：用于存储实时调用记录的全局字典
CALL_RECORDS = {}

# 定义全局的 workflow_client 和 request_processor
workflow_client_global = None
request_processor_global = None

# 预加载所有字典数据到全局缓存
DICT_CACHE = {
    "问题性质": load_excel_dict("file/训练模型字典.xlsx", "最后一级问题性质"),
    "统一关键字": load_excel_dict("file/训练模型字典.xlsx", "统一关键字"),
    "自定义关键字": load_excel_dict("file/训练模型字典.xlsx", "自定义关键字"),
    "信访性质": load_excel_dict("file/训练模型字典.xlsx", "信访性质"),
    "干部管理权限": load_excel_dict("file/反映人被反映人字典.xlsx", "干部管理权限"),
    "职级": load_excel_dict("file/反映人被反映人字典.xlsx", "职级"),
    "政治面貌": load_excel_dict("file/反映人被反映人字典.xlsx", "政治面貌")
}

async def async_log_error(bh: str, message: str):
    """异步记录错误日志"""
    # 同时更新CALL_RECORDS中的错误信息
    if bh in CALL_RECORDS:
        CALL_RECORDS[bh]["status"] = "失败"
        CALL_RECORDS[bh]["errorInfo"] = {"message": message}
    await asyncio.get_event_loop().run_in_executor(
        thread_pool,
        log_error,
        bh,
        message
    )

async def async_log_info(bh: str, message: str):
    """异步记录信息日志"""
    await asyncio.get_event_loop().run_in_executor(
        thread_pool,
        log_info,
        bh,
        message
    )

def get_dict_name_cached(dict_type: str, value: str) -> str:
    """使用缓存的字典数据获取字典项（字典值转字典项）"""
    if dict_type in DICT_CACHE:
        # 处理多个值的情况（用分号分隔）
        if ";" in value:
            values = value.split(";")
            names = []
            for v in values:
                v = v.strip()
                # 如果值在字典中，使用对应的字典项，否则保持原值
                name = DICT_CACHE[dict_type].get(v, v)
                names.append(name)
            return ",".join(names)
        
        # 处理单个值的情况
        return DICT_CACHE[dict_type].get(value.strip(), value)
    return value

async def process_workflow_output(output_text: str, bh: str) -> dict:
    """优化的异步输出解析逻辑"""
    try:
        result = {
            "bh": bh,
            "xflb": "",
            "wtxx": {
                "wtzy": "",
                "fl": [],
                "tygjz": [],
                "zdygjz": [],
                "bs": {
                    "yy": "",
                    "sggb": "",
                    "sfcf": ""
                }
            },
            "fyrs": [],
            "bfyrs": []
        }

        # 使用更可靠的方式解析输出
        sections = {}
        current_section = None
        current_content = []
        
        # 按行处理输出文本
        for line in output_text.split('\n'):
            line = line.strip()
            if not line:
                continue
                
            # 检查是否是新的段落开始
            if line.endswith(':'):
                # 保存前一个段落
                if current_section and current_content:
                    sections[current_section] = '\n'.join(current_content).strip()
                # 开始新的段落
                current_section = line[:-1].strip()
                current_content = []
            elif current_section:
                current_content.append(line)
        
        # 保存最后一个段落
        if current_section and current_content:
            sections[current_section] = '\n'.join(current_content).strip()

        # 处理各个部分
        if "xflb" in sections:
            result["xflb"] = get_dict_name_cached("信访性质", sections["xflb"])
        
        if "wtzy" in sections:
            result["wtxx"]["wtzy"] = sections["wtzy"]
        
        if "fl" in sections:
            fl_content = sections["fl"]
            if fl_content and fl_content.lower() != "null":
                # 先获取原始分类
                raw_fl = [item.strip() for item in fl_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["fl"] = [get_dict_name_cached("问题性质", item) for item in raw_fl if get_dict_name_cached("问题性质", item) != item]
        
        if "tygjz" in sections:
            tygjz_content = sections["tygjz"]
            if tygjz_content and tygjz_content.lower() != "null":
                # 先获取原始关键字
                raw_tygjz = [item.strip() for item in tygjz_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["tygjz"] = [get_dict_name_cached("统一关键字", item) for item in raw_tygjz if get_dict_name_cached("统一关键字", item) != item]
        
        if "zdygjz" in sections:
            zdygjz_content = sections["zdygjz"]
            if zdygjz_content and zdygjz_content.lower() != "null":
                # 先获取原始关键字
                raw_zdygjz = [item.strip() for item in zdygjz_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["zdygjz"] = [get_dict_name_cached("自定义关键字", item) for item in raw_zdygjz if get_dict_name_cached("自定义关键字", item) != item]
        
        if "yy" in sections:
            yy_content = sections["yy"]
            result["wtxx"]["bs"]["yy"] = "" if yy_content.lower() == "null" else yy_content
        
        if "sggb" in sections:
            sggb_content = sections["sggb"]
            result["wtxx"]["bs"]["sggb"] = "" if sggb_content.lower() == "null" else sggb_content
        
        if "sfcf" in sections:
            sfcf_content = sections["sfcf"]
            result["wtxx"]["bs"]["sfcf"] = "" if sfcf_content.lower() == "null" else sfcf_content

        # 使用专门的模块处理反映人和被反映人信息
        result["fyrs"] = process_fyrs_section(output_text.split('\n'))
        result["bfyrs"] = process_bfyrs_section(output_text.split('\n'))

        # 去重bfyrs数组
        if result["bfyrs"]:
            unique_bfyrs = []
            seen = set()
            for bfyr in result["bfyrs"]:
                # 创建一个唯一标识
                bfyr_key = f"{bfyr.get('mc', '')}_{bfyr.get('zw', '')}_{bfyr.get('dwhdz', '')}"
                if bfyr_key not in seen:
                    seen.add(bfyr_key)
                    unique_bfyrs.append(bfyr)
            result["bfyrs"] = unique_bfyrs

        # 处理人员信息字典映射
        if result["fyrs"]:
            result["fyrs"] = process_persons_list(result["fyrs"], is_reporter=True)
        if result["bfyrs"]:
            result["bfyrs"] = process_persons_list(result["bfyrs"], is_reporter=False)

        return result
    except Exception as e:
        await async_log_error(bh, f"处理过程中发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))

async def cleanup_call_records():
    """定期清理CALL_RECORDS，只保留最近三天的记录"""
    while True:
        await asyncio.sleep(3600)  # 每小时清理一次
        threshold_date = datetime.now() - timedelta(days=3)
        keys_to_delete = []
        for bh, record in CALL_RECORDS.items():
            record_time_str = record.get("startTime")
            if record_time_str:
                record_time = datetime.strptime(record_time_str, "%Y-%m-%d %H:%M:%S")
                if record_time < threshold_date:
                    keys_to_delete.append(bh)
        
        for bh in keys_to_delete:
            del CALL_RECORDS[bh]
        await async_log_info("SYSTEM", f"已清理 {len(keys_to_delete)} 条旧的调用记录。")

@asynccontextmanager
async def lifespan_event_handler(app: FastAPI):
    global session_pool
    # Startup logic
    session_pool = aiohttp.ClientSession(
        connector=aiohttp.TCPConnector(
            limit=5,  # 调整连接数限制
            ttl_dns_cache=300,  # DNS缓存时间
            use_dns_cache=True
        )
    )

    # 启动请求队列处理器
    global workflow_client_global, request_processor_global
    workflow_client_global = WorkflowClientManager(
        api_key=current_config['api_key'],
        base_url=current_config['base_url'],
        session_pool=session_pool
    )
    
    request_processor_global = RequestProcessor(
        workflow_client=workflow_client_global,
        performance_monitor=performance_monitor,
        thread_pool=thread_pool,
        request_queue=REQUEST_QUEUE,
        request_semaphore=REQUEST_SEMAPHORE,
        error_callback=async_log_error
    )
    
    asyncio.create_task(request_processor_global.process_request_queue())
    asyncio.create_task(cleanup_call_records()) # 启动清理任务

    yield

    # Shutdown logic
    if session_pool:
        await session_pool.close()

app = FastAPI(title="来信信访件信息查询API", lifespan=lifespan_event_handler)

# 挂载静态文件目录
static_files_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "static")
app.mount("/static", StaticFiles(directory=static_files_dir), name="static")

@app.get("/", include_in_schema=False)
async def get_laixin_monitor_page():
    return FileResponse(os.path.join(static_files_dir, "laixin_index.html"))

@app.post("/api/getLetters", response_model=LetterResponse)
async def get_letters(request: BaseLetterRequest, background_tasks: BackgroundTasks):
    try:
        # 记录开始时间
        start_time = time.time()
        
        # 实时记录调用请求到内存
        CALL_RECORDS[request.bh] = {
            "bh": request.bh,
            "startTime": datetime.fromtimestamp(start_time).strftime("%Y-%m-%d %H:%M:%S"),
            "status": "处理中",
            "processTime": 0,
            "requestInfo": request.dict(), # 存储请求的字典形式
            "responseInfo": None,
            "errorInfo": None
        }

        # 记录API请求
        await asyncio.get_event_loop().run_in_executor(
            thread_pool,
            log_api_request,
            request.bh,
            request.caseIntruction,
            start_time
        )
        
        # 准备请求参数
        inputs = {
            "caseIntruction": request.caseIntruction,
            "bh": request.bh
        }
        
        # 使用全局的请求处理器
        # WorkflowClientManager 和 RequestProcessor 已经在 lifespan 中初始化
        future = asyncio.Future()
        await REQUEST_QUEUE.put((inputs, request.bh, WORKFLOW_TIMEOUT, future))
        
        try:
            # 等待处理完成
            final_output = await future
            
            # 处理工作流输出
            result = await process_workflow_output(final_output, request.bh)
        
            # 计算处理时间
            processing_time = time.time() - start_time
        
            # 实时更新CALL_RECORDS中的成功信息
            if request.bh in CALL_RECORDS:
                CALL_RECORDS[request.bh]["status"] = "成功"
                CALL_RECORDS[request.bh]["responseInfo"] = result # 存储响应的字典形式
                CALL_RECORDS[request.bh]["processTime"] = processing_time # 直接存储秒数

            # 记录API响应
            await asyncio.get_event_loop().run_in_executor(
                thread_pool,
                log_api_response,
                request.bh,
                result,
                processing_time
            )

            # 记录端到端处理时间
            end_to_end_time = time.time() - start_time
            await performance_monitor.add_end_to_end_time(end_to_end_time) # 直接存储秒数
            
            return {
                "bh": request.bh,
                "success": "true",
                "message": "",
                "data": result
            }
        
        except Exception as e:
            error_msg = f"处理工作流输出失败: {str(e)}"
            # 实时更新CALL_RECORDS中的失败信息
            if request.bh in CALL_RECORDS:
                CALL_RECORDS[request.bh]["status"] = "失败"
                CALL_RECORDS[request.bh]["errorInfo"] = {"message": error_msg}
            await async_log_error(request.bh, error_msg)
            await performance_monitor.increment_failed()
            raise HTTPException(status_code=500, detail=error_msg)
            
    except Exception as e:
        error_msg = f"API处理错误: {str(e)}"
        # 实时更新CALL_RECORDS中的失败信息
        if request.bh in CALL_RECORDS:
            CALL_RECORDS[request.bh]["status"] = "失败"
            CALL_RECORDS[request.bh]["errorInfo"] = {"message": error_msg}
        await async_log_error(request.bh, error_msg)
        await performance_monitor.increment_failed()
        raise HTTPException(status_code=500, detail=error_msg)

# 添加性能监控端点
@app.get("/api/performance")
async def get_performance_metrics():
    try:
        avg_time = await performance_monitor.get_average_time()
        max_queue_size = await performance_monitor.get_max_queue_size()
        current_queue_size = REQUEST_QUEUE.qsize()
        processed_count = len(CALL_RECORDS)
        
        # 重新计算成功和失败数量，以与 processed_count 保持一致
        successful_count = sum(1 for record in CALL_RECORDS.values() if record.get("status") == "成功")
        failed_count = sum(1 for record in CALL_RECORDS.values() if record.get("status") == "失败")

        avg_end_to_end_time = await performance_monitor.get_average_end_to_end_time()
        
        return {
            "average_processing_time": avg_time,
            "max_queue_size": max_queue_size,
            "current_queue_size": current_queue_size,
            "processed_count": processed_count,
            "successful_count": successful_count,
            "failed_count": failed_count,
            "average_end_to_end_time": avg_end_to_end_time,
            "active_connections": len(thread_pool._threads) if hasattr(thread_pool, '_threads') else 0
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

# 修改：从内存中获取调用日志，并支持过滤
@app.get("/api/calls")
async def get_call_logs(date: str = Query(None, description="查询日期，格式YYYY-MM-DD"), bh: str = Query(None, description="信访件编号"), status: str = Query(None, description="查询状态，成功或失败")):
    """
    获取API调用日志列表。
    """
    print(f"Received status query: {status}") # Debug print
    try:
        # 计算三天前的日期作为阈值
        threshold_date = datetime.now() - timedelta(days=3)
        
        filtered_calls = []
        for call_record in CALL_RECORDS.values():
            record_time_str = call_record.get("startTime")
            if not record_time_str:
                continue
            
            record_time = datetime.strptime(record_time_str, "%Y-%m-%d %H:%M:%S")
            
            # 确保记录在最近三天内
            if record_time < threshold_date:
                continue

            matches_date = True
            if date:
                # 如果提供了日期，则继续按日期过滤
                if not record_time_str.startswith(date):
                    matches_date = False
            
            matches_bh = True
            if bh:
                if call_record["bh"] and bh.lower() not in call_record["bh"].lower():
                    matches_bh = False
            
            matches_status = True
            if status:
                if call_record["status"] != status:
                    matches_status = False

            if matches_date and matches_bh and matches_status:
                filtered_calls.append(call_record)
        
        # 按照开始时间降序排序
        filtered_calls.sort(key=lambda x: x.get("startTime", ""), reverse=True)

        return {"success": True, "data": filtered_calls}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取调用日志失败: {str(e)}")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host=current_config['host'], port=current_config['port']) 