import os
import logging
import json
import time
import pandas as pd
from datetime import datetime
from pathlib import Path
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from typing import List, Dict, Any, Generator
import traceback

from dotenv import load_dotenv
load_dotenv()

# --------------------------
# 日志文件夹结构设计
# log/
#   ├─ api_KGQA.log          # 全局主日志
#   ├─ 20251022-101530/      # 单个请求的日志文件夹（时间戳命名）
#   │  ├─ input_drawing.json  # 输入图纸数据
#   │  ├─ start_frame.json    # 开始帧
#   │  ├─ end_frame.json      # 结束帧
#   │  ├─ look1_input_drawing.json
#   │  ├─ look2_converted_dataframe.csv
#   │  ├─ look3_debug_工序检.csv    # 新增：工序检调试文件
#   │  ├─ look3_debug_工步检.csv    # 新增：工步检调试文件
#   │  ├─ look4_decomposition_result.json
#   │  ├─ steps/              # 中间工序帧
#   │  └─ error_info.json     # 错误信息（如有）
#   └─ 20251022-102015/      # 另一个请求的日志文件夹
# --------------------------

# 1. 初始化日志主文件夹
LOG_ROOT = Path("log")  # 主日志文件夹
LOG_ROOT.mkdir(exist_ok=True)  # 确保主文件夹存在

# 2. 配置全局主日志（写入 log/api_KGQA.log）
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler(LOG_ROOT / "api_KGQA.log", encoding="utf-8"),
        logging.StreamHandler()  # 同时输出到控制台
    ]
)
logger = logging.getLogger("工艺路线分析")

# 初始化FastAPI应用（保持原有配置）
app = FastAPI(
    title="工艺路线分析与优化系统",
    description="根据工艺路线名称推荐优化工序",
    root_path="/ai_recommend"
)

# Neo4j配置（保持不变）
NEO4J_URI = os.getenv("NEO4J_URI")
NEO4J_USER = os.getenv("NEO4J_USER")
NEO4J_PASSWORD = os.getenv("NEO4J_PASSWORD")
NEO4J_AUTH = (NEO4J_USER, NEO4J_PASSWORD)

from 正向分解2 import SmartProcessDesigner

class StreamingProcessRouteRequest(BaseModel):
    draw_type: str  # 产品名称（例如：打靶透镜） 只不过名字取名为draw_type是为了和前端接口一致
    user_drawing_info: List[Dict[str, Any]]  # 输入的图纸数据


def debug_save_process_inspections(standard_result: Dict[str, Any], request_log_dir: Path):
    """
    调试函数：保存工序检和工步检信息为CSV文件
    
    参数:
        standard_result (dict): SmartProcessDesigner.new_graph['标准工序工步']
        request_log_dir (Path): 请求日志目录路径
    """
    try:
        # 准备工序检数据
        process_inspection_data = []
        
        # 准备工步检数据
        step_inspection_data = []
        
        # 遍历所有工序
        for process_id, process_info in standard_result.items():
            # 获取工序基本信息
            process_node = process_info.get("工序节点", {})
            process_props = process_node.get("properties", {})
            process_name = process_props.get("工序名称", f"工序{process_id}")
            
            # 处理工序检列表
            for inspection in process_info.get("工序检列表", []):
                if isinstance(inspection, dict):
                    props = inspection.get("properties", {})
                    process_inspection_data.append({
                        "工序号": process_id,
                        "工序名称": process_name,
                        "分类名称": props.get("分类名称", ""),
                        "检验项目名称": props.get("检验项目名称", ""),
                        "检测面": props.get("检测面", ""),
                        "设计值技术指标": props.get("设计值技术指标", ""),
                        "计量单位": props.get("计量单位", "")
                    })
            
            # 处理工步列表
            for step_info in process_info.get("工步列表", []):
                step_node = step_info.get("工步节点", {})
                step_props = step_node.get("properties", {})
                step_id = step_props.get("工步号", "")
                step_name = step_props.get("工步名称", f"工步{step_id}")
                
                # 处理工步检列表
                for step_inspection in step_info.get("工步检列表", []):
                    if isinstance(step_inspection, dict):
                        step_props = step_inspection.get("properties", {})
                        step_inspection_data.append({
                            "工序号": process_id,
                            "工序名称": process_name,
                            "工步号": step_id,
                            "工步名称": step_name,
                            "记录项检验项名称": step_props.get("记录项检验项名称", ""),
                            "分类名称": step_props.get("分类名称", ""),
                            "检测面": step_props.get("检测面", ""),
                            "设计值技术指标": step_props.get("设计值技术指标", ""),
                            "计量单位": step_props.get("计量单位", "")
                        })
        
        # 保存工序检CSV
        if process_inspection_data:
            process_df = pd.DataFrame(process_inspection_data)
            process_file = request_log_dir / "look3_debug_工序检.csv"
            process_df.to_csv(process_file, index=False, encoding="utf-8-sig")
            logger.info(f"已保存工序检调试文件: {process_file}，共{len(process_inspection_data)}条记录")
        else:
            logger.warning("未找到工序检数据")
        
        # 保存工步检CSV
        if step_inspection_data:
            step_df = pd.DataFrame(step_inspection_data)
            step_file = request_log_dir / "look4_debug_工步检.csv"
            step_df.to_csv(step_file, index=False, encoding="utf-8-sig")
            logger.info(f"已保存工步检调试文件: {step_file}，共{len(step_inspection_data)}条记录")
        else:
            logger.warning("未找到工步检数据")
            
    except Exception as e:
        logger.error(f"保存工序检工步检调试文件失败: {str(e)}")
        import traceback
        traceback.print_exc()


@app.post("/streaming_process_route")
async def stream_process_route(request: StreamingProcessRouteRequest) -> StreamingResponse:
    """流式返回工艺路线推荐结果，同时在 log 文件夹下保存每个请求的核心信息"""

    def generate() -> Generator[str, None, None]:
        # 生成当前请求的时间戳（用于文件夹命名）
        timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")  # 格式：年月日-时分秒
        # 生成唯一请求ID（时间戳+毫秒随机数，避免并发冲突）
        request_id = f"{timestamp}-{int(time.time() * 1000) % 1000}"
        logger.info(f"开始处理新请求，请求ID: {request_id}")

        # 创建当前请求的专属文件夹（位于 log 主文件夹下）
        request_log_dir = LOG_ROOT / timestamp  # 路径：log/20251022-101530
        try:
            request_log_dir.mkdir(exist_ok=True)  # 递归创建（如果不存在）
            logger.info(f"请求ID: {request_id} - 已创建请求日志文件夹: {request_log_dir}")
        except Exception as e:
            logger.error(f"请求ID: {request_id} - 创建请求日志文件夹失败: {str(e)}")
            # 文件夹创建失败不中断流程，仅警告
            yield json.dumps({
                "type": "warning",
                "msg": f"创建请求日志文件夹失败: {str(e)}",
                "timestamp": time.time()
            }, ensure_ascii=False) + "\n"

        try:
            # --------------------------
            # 保存输入数据（原始图纸信息）
            # --------------------------
            input_data = {
                "draw_type": request.draw_type,
                "user_drawing_info": request.user_drawing_info
            }
            input_file = request_log_dir / "look1_input_drawing.json"
            with open(input_file, "w", encoding="utf-8") as f:
                json.dump(input_data, f, ensure_ascii=False, indent=2)
            logger.info(f"请求ID: {request_id} - 已保存输入图纸数据(包含draw_type)至: {input_file}")

            # --------------------------
            # 发送开始响应并保存开始帧
            # --------------------------
            start_response = {
                "type": "start",
                "message": "开始处理工艺路线，请等待工序结果逐个返回...",
                "timestamp": time.time()
            }
            # 保存开始帧
            start_frame_file = request_log_dir / "start_frame.json"
            with open(start_frame_file, "w", encoding="utf-8") as f:
                json.dump(start_response, f, ensure_ascii=False, indent=2)
            # 发送响应
            yield json.dumps(start_response, ensure_ascii=False) + "\n"
            time.sleep(0.1)
            logger.info(f"请求ID: {request_id} - 已发送开始响应")

            # --------------------------
            # 数据转换与修补（保存中间结果）
            # --------------------------
            from utils.fun1 import convert_drawing_format_to_dataframe

            # 直接转换为DataFrame，保留所有元数据
            df_data = convert_drawing_format_to_dataframe(request.user_drawing_info)

            # 保存转换后的数据为CSV
            converted_file = request_log_dir / "look2_debug_终检.csv"
            df_data.to_csv(converted_file, index=False, encoding="utf-8-sig")
            logger.info(f"请求ID: {request_id} - 数据转换完成，共{len(df_data)}条检测记录，已保存中间结果")

            # --------------------------
            # 正向分解（保存核心结果）
            # --------------------------
            process_log_file = request_log_dir / "one_doing.log"
            planer = SmartProcessDesigner(NEO4J_URI, NEO4J_AUTH, process_log_file)
            logger.info("初始化SmartProcessDesigner完成，已连接到Neo4j数据库")
            planer.read_final_inspection(df_data)
            planer.read_product_name(product_name=request.draw_type)
            planer.run()
            standard_result = planer.new_graph['标准工序工步']

            # --------------------------
            # 新增：保存工序检和工步检调试信息
            # --------------------------
            debug_save_process_inspections(standard_result, request_log_dir)

            # 保存正向分解结果
            decomposition_file = request_log_dir / "look5_decomposition_result.json"
            with open(decomposition_file, "w", encoding="utf-8") as f:
                json.dump(standard_result, f, ensure_ascii=False, indent=2)
            logger.info(f"请求ID: {request_id} - 正向分解完成，共生成{len(standard_result)}个工序")

            # --------------------------
            # 处理工序并保存中间帧
            # --------------------------
            sorted_keys = sorted(standard_result.keys(), key=lambda k: int(k))
            total_operations = len(sorted_keys)
            # 创建工序帧子文件夹
            steps_dir = request_log_dir / "steps"
            steps_dir.mkdir(exist_ok=True)  # 路径：log/20251022-101530/steps

            for op_index, key in enumerate(sorted_keys, 1):
                current_operation = standard_result[key]
                # 转换工序格式
                from utils.fun1 import convert_operation_format
                converted_op = convert_operation_format(current_operation)
                # 构建响应
                operation_response = {
                    "type": "result",
                    "operation_name": current_operation["工序节点"]["properties"].get("工序名称", f"工序{op_index}"),
                    "operation_index": op_index,
                    "total_operations": total_operations,
                    "processing_time": round(time.time() - time.time(), 4),  # 实际处理时间需调整
                    "result": {"optimization": {"converted_one_operation": converted_op}, "thinking_process": "知识图谱查询获取"},
                    "code": 0,
                    "msg": "success",
                    "timestamp": time.time()
                }
                # 保存当前工序帧（命名格式：step_1_初成形.json）
                step_file = steps_dir / f"step_{op_index}_{operation_response['operation_name']}.json"
                with open(step_file, "w", encoding="utf-8") as f:
                    json.dump(operation_response, f, ensure_ascii=False, indent=2)
                # 发送响应
                yield json.dumps(operation_response, ensure_ascii=False) + "\n"
                logger.info(f"请求ID: {request_id} - 已发送第{op_index}/{total_operations}个工序")
                time.sleep(1)

            # --------------------------
            # 发送结束响应并保存结束帧
            # --------------------------
            end_response = {
                "type": "end",
                "message": "所有工序处理完成",
                "total_operations": total_operations,
                "code": 0,
                "msg": "success",
                "timestamp": time.time()
            }
            # 保存结束帧
            end_frame_file = request_log_dir / "end_frame.json"
            with open(end_frame_file, "w", encoding="utf-8") as f:
                json.dump(end_response, f, ensure_ascii=False, indent=2)
            # 发送响应
            yield json.dumps(end_response, ensure_ascii=False) + "\n"
            logger.info(f"请求ID: {request_id} - 所有工序处理完成")

        except Exception as e:
            error_msg = f"处理失败: {str(e)}"
            logger.error(f"请求ID: {request_id} - {error_msg}", exc_info=True)
            
            # 构建统一的错误信息字典
            error_data = {
                "type": "error",
                "code": 500,
                "msg": "fail",
                "detail": error_msg,
                "timestamp": time.time()
            }
            
            # 保存错误信息到文件（修复写入逻辑）
            error_file = request_log_dir / "error_info.json"
            with open(error_file, "w", encoding="utf-8") as f:
                json.dump(error_data, f, ensure_ascii=False, indent=2)  # 直接用dump写入文件，更高效
            
            # 发送错误响应
            yield json.dumps(error_data, ensure_ascii=False) + "\n"

    # 流式响应配置（保持不变）
    headers = {
        "Cache-Control": "no-cache",
        "Connection": "keep-alive",
        "X-Accel-Buffering": "no"
    }
    
    return StreamingResponse(
        generate(),
        media_type="application/x-ndjson",
        headers=headers
    )

if __name__ == "__main__":
    import uvicorn
    logger.info("工艺路线分析与优化系统启动，监听端口: 7502")
    uvicorn.run(app, host="0.0.0.0", port=7502)