# from build.lib.traci import junction
from click import prompt
from fastapi import FastAPI
from pydantic import BaseModel
from api_server.utils.run_command import run_command
import os
import asyncio
from app import SimulationManager,main

from fastapi.requests import Request
from fastapi.responses import StreamingResponse
from openai_client import get_response
import json
from fastapi.middleware.cors import CORSMiddleware

app = FastAPI()

origins = [
    "http://127.0.0.1:80", "http://127.0.0.1:9080", "http://127.0.0.1", "http://192.168.0.103:83", "192.168.1.13:81",
]

app.add_middleware(
    CORSMiddleware,
    # allow_origins=origins,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

# os.chdir(r"E:/wq/2025/交通AI/sumo-llm/api-server")

class Item(BaseModel):
    name: str
    price: float

prompt = f"""作为交通信号配时专家，请根据数据简要分析交通状况，注意是没有行人和公交的，路口描述：
该路口是成都市的"倪家桥路与领事馆路交叉口"。有4个相位，分别是：
1.南北方向直行与右转（南北方向各有5条车道（4直行+1右转））；
2.南北方向左转（南北方向各有1条左转车道）；
3.东向西通行（东方向2条车道）；
4.西向东通行（西方向2条车道）"""

@app.on_event("startup")
async def startup_event():
    global manager,st
    # 项目启动时执行的命令行命令
    print("Starting up...")

    # await run_command(r"cd")
    # # 在后台运行 Streamlit
    # asyncio.create_task(run_command("streamlit run app.py"))

    current_dir = os.getcwd()
    print(current_dir)
    config_file = os.path.join(current_dir, "osm.sumocfg")
    junctions_file = os.path.join(current_dir, "J54_data.json")
    manager = SimulationManager()
    manager.start_simulation(config_file, junctions_file)

    # st = main()
    # if st.session_state.sim_manager.start_simulation(config_file, junctions_file):
    #     st.session_state.simulation_running = True
    #     st.success("仿真启动成功！")
    # else:
    #     st.error("仿真启动失败！")
    print("Startup complete.")

# @app.on_event("shutdown")
# async def shutdown_event():
#     print("Shutting down...")
#     # 在这里可以添加关闭 Streamlit 服务器的代码（如果需要）
#     print("Shutdown complete.")

async def call_with_messages(messages,request):
    def generate_event(content):
        """统一生成 SSE 事件数据结构"""
        return {
            "id": "chatcmpl-4",
            "choices": [{
                "delta": {
                    "content": content,
                    "function_call": "",
                    "role": "",
                    "tool_calls": ""
                },
                "finish_reason": "",
                "index": 0,
                "logprobs": ""
            }]
        }

    completion = await get_response(messages)
    # 记录当前内容类型（推理/常规）
    current_content_type = None
    for chunk in completion:
        # 原始响应数据流处理
        chunk_data = json.loads(chunk.model_dump_json())
        delta = chunk_data["choices"][0].get("delta", {})

        # 处理常规内容
        if content := delta.get("content", ""):
            if current_content_type != "content":
                n = str("\n\n")
                yield f'data: {json.dumps({"data": [generate_event(n)]})}\n\n'
            current_content_type = "content"
            yield f'data: {json.dumps({"data": [chunk_data]})}\n\n'

        # 处理推理内容（Markdown 引用格式）
        if reasoning_content := delta.get("reasoning_content", ""):
            # 精确处理每行添加引用符号
            if current_content_type is None:
                reasoning_content = ">  " + reasoning_content
            formatted_reasoning = reasoning_content.replace('\n','\n> ')

            event_data = json.dumps({"data": [generate_event(formatted_reasoning)]})
            current_content_type = "reasoning"
            yield f'data: {event_data}\n\n'

        # 处理连接中断
        if await request.is_disconnected():
            event_data = json.dumps({"data": [generate_event("**_本次回答已被终止_**")]})
            yield f'data: {event_data}\n\n'
            break

@app.get("/")
async def root():

    print(manager._get_signalized_junctions())
    print(manager.get_junction_state('671587313'))
    print(type(manager.get_junction_state('671587313')))
    return {"message": "Hello World"}

@app.post("/chat")
async def chat(request: Request):
    data = await request.json()
    user_input = data.get("message", "")
    model = data.get("model", None)
    conversation_id = data.get("conversation_id", None)
    history_message = data.get("history", [])

    system_message = [{'role': 'system', 'content': prompt}]
    # 删除每个字典中的 'id' 键
    history_message = [{k: v for k, v in msg.items() if k != 'id'} for msg in history_message]

    junction_state_data = []
    # junction_id_list = manager._get_signalized_junctions()
    # for data_id in junction_id_list:
    counts, phase_info = manager.get_junction_state('J54')
    # counts, phase_info = st.session_state.sim_manager.get_junction_state('J54')
    print(counts, phase_info)
    junction_state_data.append(['倪家桥路与领事馆路交叉口',counts, phase_info])
    system_message.append({'role': 'system', 'content': str(junction_state_data)})
    messages = system_message + history_message + [{'role': 'user', 'content': user_input}]
    print(messages)
    return StreamingResponse(call_with_messages(messages,request), media_type="text/event-stream")


@app.get(
    "/conversations",
    response_model='',
    operation_id="conversations",
    summary="获取会话列表"
)
async def select_conversations():
    return []

@app.get(
    "/conversations/{conversation_id}",
    response_model='',
    operation_id="conversationsMessagesList",
    summary="获取会话的详细对话记录"
)
async def select_conversation_messages():
    return []

@app.get("/items/{item_id}")
def read_item(item_id: int, q: str = None):
    return {"item_id": item_id, "q": q}

@app.post("/items/")
def create_item(item: Item):
    return {"item_name": item.name, "item_price": item.price}

def start():
    import uvicorn
    uvicorn.run(app, host="127.0.0.1", port=8000)

if __name__ == "__main__":
    start()


