from fastapi import FastAPI, HTTPException
from fastapi.responses import StreamingResponse
import json
import asyncio
from typing import AsyncGenerator, Optional
from datetime import datetime
import logging
import uuid

# 导入自定义模型
from model.custom_stream_models import (
    DetailedStreamResponse, 
    FrontendStreamResponse, 
    SimpleStreamResponse,
    HierarchicalStreamResponse
)
from model.http_models import ChatRequest
from llm_apis.ds_ggb_management import GGBManagement

app = FastAPI()

# 配置日志
logging.basicConfig(
    filename="custom_conversation_service.log",
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s"
)

# 初始化GGB管理
ggb_management = GGBManagement()

async def process_message_stream_detailed(
    conversation_id: int,
    user_message: str,
    message_order: int
) -> AsyncGenerator[str, None]:
    """
    使用详细格式的流式处理
    """
    try:
        step_mapping = {
            "element_extraction": {"name": "元素提取", "progress": 0.25},
            "ggb_generation": {"name": "GGB命令生成", "progress": 0.5},
            "html_generation": {"name": "HTML生成", "progress": 0.75},
            "html_optimization": {"name": "HTML优化", "progress": 1.0}
        }
        
        current_step = None
        
        async for chunk in ggb_management.process_problem(user_message):
            if chunk["type"] == "error":
                response = DetailedStreamResponse(
                    type="error",
                    content=chunk["content"],
                    timestamp=datetime.now(),
                    message_order=message_order
                )
                yield f"data: {response.model_dump_json()}\n\n"
                return
            
            # 检测步骤变化
            if chunk.get("step") != current_step:
                if current_step:
                    # 发送步骤结束信号
                    end_response = DetailedStreamResponse(
                        type="step_end",
                        step=current_step,
                        content=f"{step_mapping[current_step]['name']}完成",
                        progress=step_mapping[current_step]['progress'],
                        timestamp=datetime.now(),
                        message_order=message_order
                    )
                    yield f"data: {end_response.model_dump_json()}\n\n"
                
                current_step = chunk.get("step")
                if current_step and current_step in step_mapping:
                    # 发送步骤开始信号
                    start_response = DetailedStreamResponse(
                        type="step_start",
                        step=current_step,
                        content=f"开始{step_mapping[current_step]['name']}",
                        progress=step_mapping[current_step]['progress'] - 0.25,
                        timestamp=datetime.now(),
                        message_order=message_order
                    )
                    yield f"data: {start_response.model_dump_json()}\n\n"
            
            # 发送消息内容
            response = DetailedStreamResponse(
                type="message",
                step=chunk.get("step"),
                content=chunk["content"],
                progress=step_mapping.get(chunk.get("step", ""), {}).get("progress", 0),
                timestamp=datetime.now(),
                message_order=message_order,
                metadata={"chunk_type": chunk["type"]}
            )
            yield f"data: {response.model_dump_json()}\n\n"

        # 发送完成信号
        completion_response = DetailedStreamResponse(
            type="complete",
            content="所有处理步骤已完成",
            progress=1.0,
            timestamp=datetime.now(),
            message_order=message_order
        )
        yield f"data: {completion_response.model_dump_json()}\n\n"

    except Exception as e:
        logging.error(f"Error in process_message_stream_detailed: {str(e)}")
        error_response = DetailedStreamResponse(
            type="error",
            content=f"处理过程中发生错误: {str(e)}",
            timestamp=datetime.now(),
            message_order=message_order
        )
        yield f"data: {error_response.model_dump_json()}\n\n"

async def process_message_stream_frontend(
    conversation_id: int,
    user_message: str,
    message_order: int
) -> AsyncGenerator[str, None]:
    """
    使用前端友好格式的流式处理
    """
    try:
        message_id = str(uuid.uuid4())
        step_progress = 0
        
        async for chunk in ggb_management.process_problem(user_message):
            if chunk["type"] == "error":
                response = FrontendStreamResponse(
                    id=message_id,
                    type="error",
                    payload=chunk["content"],
                    created_at=datetime.now()
                )
                yield f"data: {response.model_dump_json()}\n\n"
                return
            
            # 根据步骤设置进度
            step_mapping = {
                "element_extraction": 1,
                "ggb_generation": 2,
                "html_generation": 3,
                "html_optimization": 4
            }
            
            current_progress = step_mapping.get(chunk.get("step"), 0)
            if current_progress > step_progress:
                step_progress = current_progress
            
            # 根据chunk类型设置响应类型
            response_type = "text"
            if chunk["type"] == "ggb_commands":
                response_type = "commands"
            elif chunk["type"] in ["html_code", "optimized_html"]:
                response_type = "html"
            
            response = FrontendStreamResponse(
                id=message_id,
                type=response_type,
                payload=chunk["content"],
                step_name=chunk.get("step"),
                step_progress=step_progress,
                is_final=(chunk["type"] == "optimized_html"),
                created_at=datetime.now()
            )
            yield f"data: {response.model_dump_json()}\n\n"

        # 发送完成信号
        completion_response = FrontendStreamResponse(
            id=message_id,
            type="complete",
            payload="处理完成",
            step_progress=4,
            is_final=True,
            created_at=datetime.now()
        )
        yield f"data: {completion_response.model_dump_json()}\n\n"

    except Exception as e:
        logging.error(f"Error in process_message_stream_frontend: {str(e)}")
        error_response = FrontendStreamResponse(
            id=message_id,
            type="error",
            payload=f"处理错误: {str(e)}",
            created_at=datetime.now()
        )
        yield f"data: {error_response.model_dump_json()}\n\n"

async def process_message_stream_simple(
    conversation_id: int,
    user_message: str,
    message_order: int
) -> AsyncGenerator[str, None]:
    """
    使用简化格式的流式处理
    """
    try:
        async for chunk in ggb_management.process_problem(user_message):
            if chunk["type"] == "error":
                response = SimpleStreamResponse(
                    status="error",
                    message=chunk["content"]
                )
                yield f"data: {response.model_dump_json()}\n\n"
                return
            
            response = SimpleStreamResponse(
                status="processing",
                message=chunk["content"],
                data={"step": chunk.get("step"), "type": chunk["type"]}
            )
            yield f"data: {response.model_dump_json()}\n\n"

        # 发送完成信号
        completion_response = SimpleStreamResponse(
            status="done",
            message="处理完成"
        )
        yield f"data: {completion_response.model_dump_json()}\n\n"

    except Exception as e:
        logging.error(f"Error in process_message_stream_simple: {str(e)}")
        error_response = SimpleStreamResponse(
            status="error",
            message=f"处理错误: {str(e)}"
        )
        yield f"data: {error_response.model_dump_json()}\n\n"

# API端点
@app.post("/chat/detailed")
async def chat_detailed(request: ChatRequest):
    """使用详细格式的聊天接口"""
    conversation_id = request.conversation_id or 1
    return StreamingResponse(
        process_message_stream_detailed(
            conversation_id=conversation_id,
            user_message=request.message,
            message_order=request.message_order or 1
        ),
        media_type="text/event-stream"
    )

@app.post("/chat/frontend")
async def chat_frontend(request: ChatRequest):
    """使用前端友好格式的聊天接口"""
    conversation_id = request.conversation_id or 1
    return StreamingResponse(
        process_message_stream_frontend(
            conversation_id=conversation_id,
            user_message=request.message,
            message_order=request.message_order or 1
        ),
        media_type="text/event-stream"
    )

@app.post("/chat/simple")
async def chat_simple(request: ChatRequest):
    """使用简化格式的聊天接口"""
    conversation_id = request.conversation_id or 1
    return StreamingResponse(
        process_message_stream_simple(
            conversation_id=conversation_id,
            user_message=request.message,
            message_order=request.message_order or 1
        ),
        media_type="text/event-stream"
    )

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8002) 