import sys
import os
# 添加项目根目录到系统路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

from fastapi import FastAPI, HTTPException, BackgroundTasks, Query
from contextlib import asynccontextmanager
import json
import time
import re
import asyncio
from concurrent.futures import ThreadPoolExecutor
import aiohttp
from asyncio import Queue
from fastapi.staticfiles import StaticFiles
from datetime import datetime
from fastapi.responses import FileResponse

from common.train_dict import load_excel_dict  # 导入字典处理函数
from common.person_dict import process_persons_list  # 导入人员字典处理模块
from common.workflow_client import WorkflowClientManager
from common.performance_monitor import PerformanceMonitor
from common.request_processor import RequestProcessor
from common.models import NetLetterRequest, LetterResponse

from net.config import current_config  # 导入配置
from net.net_logger import log_api_request, log_workflow_output, log_api_response, log_error, log_info # 导入日志模块

# 设置工作流超时时间（秒）
WORKFLOW_TIMEOUT = 300

# 创建线程池
thread_pool = ThreadPoolExecutor(max_workers=15)

# 创建全局会话池
session_pool = None

# 创建性能监控实例
performance_monitor = PerformanceMonitor()

# 创建请求队列和信号量
REQUEST_QUEUE = Queue(maxsize=100)
REQUEST_SEMAPHORE = asyncio.Semaphore(10)  # 调整并发数到10

# 新增：用于存储实时调用记录的全局字典
CALL_RECORDS = {}

# 定义全局的 workflow_client 和 request_processor
workflow_client_global = None
request_processor_global = None

# 预加载所有字典数据到全局缓存
DICT_CACHE = {
    "问题性质": load_excel_dict("file/训练模型字典.xlsx", "最后一级问题性质"),
    "统一关键字": load_excel_dict("file/训练模型字典.xlsx", "统一关键字"),
    "自定义关键字": load_excel_dict("file/训练模型字典.xlsx", "自定义关键字"),
    "信访性质": load_excel_dict("file/训练模型字典.xlsx", "信访性质"),
    "干部管理权限": load_excel_dict("file/反映人被反映人字典.xlsx", "干部管理权限"),
    "职级": load_excel_dict("file/反映人被反映人字典.xlsx", "职级"),
    "政治面貌": load_excel_dict("file/反映人被反映人字典.xlsx", "政治面貌")
}

@asynccontextmanager
async def lifespan(app: FastAPI):
    # 启动时执行
    global session_pool
    session_pool = aiohttp.ClientSession(
        connector=aiohttp.TCPConnector(
            limit=5,  # 调整连接数限制
            ttl_dns_cache=300,  # DNS缓存时间
            use_dns_cache=True
        )
    )
    
    # 启动请求队列处理器
    global workflow_client_global, request_processor_global
    workflow_client_global = WorkflowClientManager(
        api_key=current_config['api_key'],
        base_url=current_config['base_url'],
        session_pool=session_pool
    )
    
    request_processor_global = RequestProcessor(
        workflow_client=workflow_client_global,
        performance_monitor=performance_monitor,
        thread_pool=thread_pool,
        request_queue=REQUEST_QUEUE,
        request_semaphore=REQUEST_SEMAPHORE,
        error_callback=async_log_error
    )
    
    asyncio.create_task(request_processor_global.process_request_queue())
    
    yield
    
    # 关闭时执行
    if session_pool:
        await session_pool.close()

app = FastAPI(title="网络信访件信息查询API", lifespan=lifespan)

# 挂载静态文件目录
static_files_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "static")
app.mount("/static", StaticFiles(directory=static_files_dir), name="static")

@app.get("/", include_in_schema=False)
async def get_net_monitor_page():
    return FileResponse(os.path.join(static_files_dir, "net_index.html"))

async def async_log_error(bh: str, message: str):
    """异步记录错误日志"""
    # 同时更新CALL_RECORDS中的错误信息
    if bh in CALL_RECORDS:
        CALL_RECORDS[bh]["status"] = "失败"
        CALL_RECORDS[bh]["errorInfo"] = {"message": message}
    await asyncio.get_event_loop().run_in_executor(
        thread_pool,
        log_error,
        bh,
        message
    )

async def async_log_info(bh: str, message: str):
    """异步记录信息日志"""
    await asyncio.get_event_loop().run_in_executor(
        thread_pool,
        log_info,
        bh,
        message
    )

def get_dict_name_cached(dict_type: str, value: str) -> str:
    """使用缓存的字典数据获取字典项（字典值转字典项）"""
    if dict_type in DICT_CACHE:
        # 处理多个值的情况（用分号分隔）
        if ";" in value:
            values = value.split(";")
            names = []
            for v in values:
                v = v.strip()
                # 如果值在字典中，使用对应的字典项，否则保持原值
                name = DICT_CACHE[dict_type].get(v, v)
                names.append(name)
            return ",".join(names)
        
        # 处理单个值的情况
        return DICT_CACHE[dict_type].get(value.strip(), value)
    return value

async def process_workflow_output(output_text: str, bh: str) -> dict:
    """优化的异步输出解析逻辑"""
    try:
        result = {
            "bh": bh,
            "xflb": "",
            "wtxx": {
                "wtzy": "",
                "fl": [],
                "tygjz": [],
                "zdygjz": [],
                "bs": {
                    "yy": "",
                    "sggb": "",
                    "sfcf": ""
                }
            },
            "bfyrs": []
        }

        # 使用更可靠的方式解析输出
        sections = {}
        current_section = None
        current_content = []
        
        # 按行处理输出文本
        for line in output_text.split('\n'):
            line = line.strip()
            if not line:
                continue
                
            # 检查是否是新的段落开始
            if line.endswith(':'):
                # 保存前一个段落
                if current_section and current_content:
                    sections[current_section] = '\n'.join(current_content).strip()
                # 开始新的段落
                current_section = line[:-1].strip()
                current_content = []
            elif current_section:
                current_content.append(line)
        
        # 保存最后一个段落
        if current_section and current_content:
            sections[current_section] = '\n'.join(current_content).strip()

        # 处理各个部分
        if "xflb" in sections:
            result["xflb"] = get_dict_name_cached("信访性质", sections["xflb"])
        
        if "wtzy" in sections:
            result["wtxx"]["wtzy"] = sections["wtzy"]
        
        if "fl" in sections:
            fl_content = sections["fl"]
            if fl_content:
                # 先获取原始分类
                raw_fl = [item.strip() for item in fl_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["fl"] = [get_dict_name_cached("问题性质", item) for item in raw_fl if get_dict_name_cached("问题性质", item) != item]
        
        if "tygjz" in sections:
            tygjz_content = sections["tygjz"]
            if tygjz_content:
                # 先获取原始关键字
                raw_tygjz = [item.strip() for item in tygjz_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["tygjz"] = [get_dict_name_cached("统一关键字", item) for item in raw_tygjz if get_dict_name_cached("统一关键字", item) != item]
        
        if "zdygjz" in sections:
            zdygjz_content = sections["zdygjz"]
            if zdygjz_content:
                # 先获取原始关键字
                raw_zdygjz = [item.strip() for item in zdygjz_content.split(";") if item.strip()]
                # 转换为字典项，只保留成功转换的
                result["wtxx"]["zdygjz"] = [get_dict_name_cached("自定义关键字", item) for item in raw_zdygjz if get_dict_name_cached("自定义关键字", item) != item]
        
        if "yy" in sections:
            yy_content = sections["yy"]
            result["wtxx"]["bs"]["yy"] = "" if yy_content.lower() == "null" else yy_content
        
        if "sggb" in sections:
            sggb_content = sections["sggb"]
            result["wtxx"]["bs"]["sggb"]  = "" if sggb_content.lower() == "null" else sggb_content
            
        
        if "sfcf" in sections:
            result["wtxx"]["bs"]["sfcf"] = sections["sfcf"]
        
        # 处理被反映人数据
        if "bfyrs" in sections:
            try:
                bfyr_data = sections["bfyrs"]
                # 清理JSON字符串
                bfyr_data = bfyr_data.replace('\n', '').replace('\r', '')
                # 移除多余的空白字符
                bfyr_data = re.sub(r'\s+', ' ', bfyr_data)
                # 尝试解析JSON
                try:
                    bfyrs = json.loads(bfyr_data)
                    # 对每个被反映人的字段进行字典值转换
                    for bfyr in bfyrs:
                        if "zj" in bfyr and bfyr["zj"]:
                            bfyr["zj"] = get_dict_name_cached("职级", bfyr["zj"])
                        if "gbglqx" in bfyr:
                            if bfyr["gbglqx"] and bfyr["gbglqx"] != "null":
                                bfyr["gbglqx"] = get_dict_name_cached("干部管理权限", bfyr["gbglqx"])
                            else:
                                bfyr["gbglqx"] = ""
                    result["bfyrs"] = bfyrs
                except json.JSONDecodeError:
                    # 如果直接解析失败，尝试修复常见格式问题
                    bfyr_data = re.sub(r',\s*}', '}', bfyr_data)
                    bfyr_data = re.sub(r',\s*]', ']', bfyr_data)
                    bfyrs = json.loads(bfyr_data)
                    # 对每个被反映人的字段进行字典值转换
                    for bfyr in bfyrs:
                        if "zj" in bfyr and bfyr["zj"]:
                            bfyr["zj"] = get_dict_name_cached("职级", bfyr["zj"])
                        if "gbglqx" in bfyr:
                            if bfyr["gbglqx"] and bfyr["gbglqx"] != "null":
                                bfyr["gbglqx"] = get_dict_name_cached("干部管理权限", bfyr["gbglqx"])
                            else:
                                bfyr["gbglqx"] = ""
                    result["bfyrs"] = bfyrs
            except Exception as e:
                await async_log_error(bh, f"解析bfyrs数据失败: {str(e)}")
                result["bfyrs"] = []

        # 异步处理人员信息
        if result["bfyrs"]:
            result["bfyrs"] = await asyncio.get_event_loop().run_in_executor(
                thread_pool,
                process_persons_list,
                result["bfyrs"],
                False
            )

        return result
    except Exception as e:
        await async_log_error(bh, f"处理过程中发生错误: {str(e)}")
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/api/getNetLetters", response_model=LetterResponse)
async def get_letters(request: NetLetterRequest, background_tasks: BackgroundTasks):
    try:
        # 记录开始时间
        start_time = time.time()
        
        # 实时记录调用请求到内存
        CALL_RECORDS[request.bh] = {
            "bh": request.bh,
            "startTime": datetime.fromtimestamp(start_time).strftime("%Y-%m-%d %H:%M:%S"),
            "status": "处理中",
            "processTime": 0,
            "requestInfo": request.dict(), # 存储请求的字典形式
            "responseInfo": None,
            "errorInfo": None
        }

        # 记录API请求到日志
        await asyncio.get_event_loop().run_in_executor(
            thread_pool,
            log_api_request,
            request.bh,
            request.caseIntruction,
            start_time
        )
        
        # 准备请求参数
        inputs = {
            "caseIntruction": request.caseIntruction,
            "bh": request.bh
        }
        
        # 只有当bfyrs不为空时才添加到请求参数中
        if request.bfyrs and len(request.bfyrs) > 0:
            inputs["bfyrs"] = json.dumps(request.bfyrs, ensure_ascii=False)
        if request.cfj and len(request.cfj) > 0:
            inputs["cfj"] = json.dumps(request.cfj, ensure_ascii=False)
        if request.fyrs and len(request.fyrs) > 0:
            inputs["fyrs"] = json.dumps(request.fyrs, ensure_ascii=False)
        
        # 使用全局的请求处理器
        # WorkflowClientManager 和 RequestProcessor 已经在 lifespan 中初始化
        future = asyncio.Future()
        await REQUEST_QUEUE.put((inputs, request.bh, WORKFLOW_TIMEOUT, future))
        
        try:
            # 等待处理完成
            final_output = await future
            
            # 处理工作流输出
            result = await process_workflow_output(final_output, request.bh)
            
            # 计算处理时间
            processing_time = time.time() - start_time
            
            # 实时更新CALL_RECORDS中的成功信息
            if request.bh in CALL_RECORDS:
                CALL_RECORDS[request.bh]["status"] = "成功"
                CALL_RECORDS[request.bh]["responseInfo"] = result # 存储响应的字典形式
                CALL_RECORDS[request.bh]["processTime"] = processing_time # 直接存储秒数

            # 记录API响应到日志
            await asyncio.get_event_loop().run_in_executor(
                thread_pool,
                log_api_response,
                request.bh,
                result,
                processing_time
            )
            
            # 记录端到端处理时间
            end_to_end_time = time.time() - start_time
            await performance_monitor.add_end_to_end_time(end_to_end_time) # 直接存储秒数
            
            return {
                "bh": request.bh,
                "success": "true",
                "message": "",
                "data": result
            }
            
        except Exception as e:
            error_msg = f"处理工作流输出失败: {str(e)}"
            # 实时更新CALL_RECORDS中的失败信息
            if request.bh in CALL_RECORDS:
                CALL_RECORDS[request.bh]["status"] = "失败"
                CALL_RECORDS[request.bh]["errorInfo"] = {"message": error_msg}
            await async_log_error(request.bh, error_msg)
            await performance_monitor.increment_failed()
            raise HTTPException(status_code=500, detail=error_msg)
            
    except Exception as e:
        error_msg = f"API处理错误: {str(e)}"
        # 实时更新CALL_RECORDS中的失败信息
        if request.bh in CALL_RECORDS:
            CALL_RECORDS[request.bh]["status"] = "失败"
            CALL_RECORDS[request.bh]["errorInfo"] = {"message": error_msg}
        await async_log_error(request.bh, error_msg)
        await performance_monitor.increment_failed()
        raise HTTPException(status_code=500, detail=error_msg)

# 修改：从内存中获取调用日志，并支持过滤
@app.get("/api/calls")
async def get_call_logs(date: str = Query(None, description="查询日期，格式YYYY-MM-DD"), bh: str = Query(None, description="信访件编号"), status: str = Query(None, description="查询状态，成功或失败")):
    """
    获取API调用日志列表。
    """
    try:
        filtered_calls = []
        for call_record in CALL_RECORDS.values():
            matches_date = True
            if date:
                # 假设 startTime 格式为 "YYYY-MM-DD HH:MM:SS"
                if call_record["startTime"] and not call_record["startTime"].startswith(date):
                    matches_date = False
            
            matches_bh = True
            if bh:
                if call_record["bh"] and bh.lower() not in call_record["bh"].lower():
                    matches_bh = False
            
            matches_status = True
            if status:
                if call_record["status"] != status:
                    matches_status = False

            if matches_date and matches_bh and matches_status:
                filtered_calls.append(call_record)
        
        # 按照开始时间降序排序
        filtered_calls.sort(key=lambda x: x.get("startTime", ""), reverse=True)

        return {"success": True, "data": filtered_calls}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"获取调用日志失败: {str(e)}")

# 重新添加性能监控端点
@app.get("/api/performance")
async def get_performance_metrics():
    try:
        avg_time = await performance_monitor.get_average_time()
        max_queue_size = await performance_monitor.get_max_queue_size()
        current_queue_size = REQUEST_QUEUE.qsize()
        processed_count = len(CALL_RECORDS)
        
        # 重新计算成功和失败数量，以与 processed_count 保持一致
        successful_count = sum(1 for record in CALL_RECORDS.values() if record.get("status") == "成功")
        failed_count = sum(1 for record in CALL_RECORDS.values() if record.get("status") == "失败")

        avg_end_to_end_time = await performance_monitor.get_average_end_to_end_time()
        
        return {
            "average_processing_time": avg_time,
            "max_queue_size": max_queue_size,
            "current_queue_size": current_queue_size,
            "processed_count": processed_count,
            "successful_count": successful_count,
            "failed_count": failed_count,
            "average_end_to_end_time": avg_end_to_end_time,
            "active_connections": len(thread_pool._threads) if hasattr(thread_pool, '_threads') else 0
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host=current_config['host'], port=current_config['port']) 