from fastapi import FastAPI, HTTPException, BackgroundTasks
from fastapi.responses import StreamingResponse
import json
import asyncio
from typing import AsyncGenerator, Optional
from datetime import datetime
import logging
import uuid
import time

# 导入流式响应模型
from model.stream_response_models import (
    StreamResponse, 
    create_response_head,
    create_reasoning_response,
    create_result_response,
    create_html_code_response,
    create_complete_response,
    create_error_response
)
from model.http_models import (
    Conversation, Message, ConversationCreate, MessageCreate,
    ConversationResponse, ChatRequest, ConversationUpdate
)
from llm_apis.ds_ggb_management import GGBManagement
from llm_apis.tex_pic_gen.api import classify_math_problem

app = FastAPI()

# Configure logging
logging.basicConfig(
    filename="conversation_service.log",
    level=logging.INFO,
    format="%(asctime)s - %(levelname)s - %(message)s"
)

# Initialize GGB management
ggb_management = GGBManagement()

# 分支处理函数
async def process_2d_geometry(problem: str):
    """处理2D平面几何问题 - 使用现有的GGB管理流程"""
    async for chunk in ggb_management.process_problem(problem):
        yield chunk

async def process_3d_geometry(problem: str, image_path: str = None):
    """处理3D立体几何问题"""
    async for chunk in ggb_management.process_threed_problem(problem, image_path):
        yield chunk

async def process_function(problem: str):
    """处理函数问题"""
    async for chunk in ggb_management.process_function_problem(problem):
        yield chunk

async def process_knowledge(knowledge_point: str):
    """处理知识点问题"""
    async for chunk in ggb_management.process_knowledge_problem(knowledge_point):
        yield chunk

async def process_other(problem: str):
    """处理其他类型问题 - 待实现"""
    yield {
        "type": "reasoning",
        "content": "正在处理其他类型数学问题...\n",
        "step": "other_start"
    }
    yield {
        "type": "reasoning",
        "content": "其他类型问题处理功能正在开发中，敬请期待！\n",
        "step": "other_placeholder"
    }
    yield {
        "type": "element",
        "content": "其他类型问题分析 - 待实现",
        "step": "other_element"
    }
    yield {
        "type": "complete",
        "content": "其他类型问题处理完成（占位实现）",
        "step": "other_complete"
    }

async def process_message_stream(
    conversation_id: str,
    message_id: str,
    frontend_message_id: str,
    user_message: str,
    message_order: int,
    image_base64: Optional[str] = None
) -> AsyncGenerator[str, None]:
    """
    优化的流式处理 - 限制reasoning输出，确保后续步骤能够执行
    """
    start_time = time.time()
    total_tokens = 0
    final_result = ""
    final_html = ""
    # 移除reasoning截断限制，显示完整推理过程
    
    try:
        # 1. 发送响应头
        response_head = create_response_head(
            conversation_id=conversation_id,
            message_id=message_id,
            frontend_message_id=frontend_message_id,
            message_order=message_order
        )
        yield f"data: {response_head.model_dump_json()}\n\n"
        
        # 2. 先进行意图识别
        reasoning_response = create_reasoning_response(
            content='正在识别题目类型...\n', 
            conversation_id=conversation_id, 
            message_id=message_id, 
            frontend_message_id=frontend_message_id, 
            message_order=message_order, 
            thinking_elapsed_secs=0, 
            step='intent_classification'
        )
        yield f"data: {reasoning_response.model_dump_json()}\n\n"
        
        # 调用意图分类
        try:
            logging.info(f"开始意图识别，用户消息: {user_message[:100]}...")
            logging.info(f"是否包含图片: {image_base64 is not None}")
            
            # 将base64图片保存为临时文件（如果有图片的话）
            temp_image_path = None
            if image_base64:
                import base64
                import tempfile
                import os
                
                logging.info("开始处理base64图片...")
                try:
                    # 解码base64并保存为临时文件
                    image_data = base64.b64decode(image_base64)
                    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.png')
                    temp_file.write(image_data)
                    temp_file.close()
                    temp_image_path = temp_file.name
                    logging.info(f"图片已保存到临时文件: {temp_image_path}")
                except Exception as img_error:
                    logging.error(f"图片处理失败: {img_error}")
                    # 继续处理，只使用文字
            
            logging.info(f"调用classify_math_problem，图片路径: {temp_image_path}, 文字内容: {user_message[:50]}...")
            
            # 调用分类函数
            classification_result = classify_math_problem(
                image_path=temp_image_path,
                question_text=user_message
            )
            
            logging.info(f"意图识别结果: {classification_result}")
            
            # 清理临时文件
            if temp_image_path and os.path.exists(temp_image_path):
                os.unlink(temp_image_path)
                logging.info(f"已清理临时文件: {temp_image_path}")
            
            logging.info(f"检查分类结果成功状态: {classification_result.get('success', False)}")
            
            if not classification_result["success"]:
                error_msg = classification_result.get('error', '未知错误')
                logging.error(f"意图识别失败: {error_msg}")
                error_response = create_error_response(
                    error_message=f"意图识别失败: {error_msg}",
                    conversation_id=conversation_id,
                    message_id=message_id,
                    frontend_message_id=frontend_message_id,
                    message_order=message_order
                )
                yield f"data: {error_response.model_dump_json()}\n\n"
                return
            
            # 获取分类结果
            logging.info(f"分类结果结构: {classification_result}")
            problem_type = classification_result["result"]["类型"]
            extracted_problem = classification_result["result"]["题目"]
            logging.info(f"识别的问题类型: {problem_type}, 提取的题目: {extracted_problem[:50]}...")
            
            # 发送意图识别结果
            intent_result_response = create_reasoning_response(
                content=f'识别结果：题目类型为【{problem_type}】\n提取的题目内容：{extracted_problem}\n\n开始处理...\n', 
                conversation_id=conversation_id, 
                message_id=message_id, 
                frontend_message_id=frontend_message_id, 
                message_order=message_order, 
                thinking_elapsed_secs=time.time()-start_time, 
                step='intent_result'
            )
            yield f"data: {intent_result_response.model_dump_json()}\n\n"
            
        except Exception as e:
            logging.error(f"意图识别过程出错: {str(e)}")
            error_response = create_error_response(
                error_message=f"意图识别过程出错: {str(e)}",
                conversation_id=conversation_id,
                message_id=message_id,
                frontend_message_id=frontend_message_id,
                message_order=message_order
            )
            yield f"data: {error_response.model_dump_json()}\n\n"
            return
        
        # 3. 根据分类结果选择处理分支
        logging.info(f"开始路由到处理分支: {problem_type}")
        if problem_type == "2D平面几何":
            logging.info("路由到2D平面几何处理分支")
            processing_generator = process_2d_geometry(extracted_problem)
        elif problem_type == "3D立体几何":
            logging.info("路由到3D立体几何处理分支")
            processing_generator = process_3d_geometry(extracted_problem, temp_image_path if 'temp_image_path' in locals() else None)
        elif problem_type == "函数":
            logging.info("路由到函数处理分支")
            processing_generator = process_function(extracted_problem)
        elif problem_type == "知识点":
            logging.info("路由到知识点处理分支")
            processing_generator = process_knowledge(extracted_problem)
        else:  # 其他
            logging.info(f"路由到其他类型处理分支，类型: {problem_type}")
            processing_generator = process_other(extracted_problem)
        
        # 4. 流式处理选定的分支
        logging.info(f"开始流式处理分支数据...")
        chunk_count = 0
        async for chunk in processing_generator:
            chunk_count += 1
            current_time = time.time()
            elapsed_time = current_time - start_time
            
            logging.info(f"收到第{chunk_count}个chunk，类型: {chunk.get('type')}, 步骤: {chunk.get('step')}")
            logging.info(f"Chunk内容长度: {len(str(chunk.get('content', '')))}")
            
            if chunk["type"] == "error":
                logging.error(f"处理分支返回错误: {chunk['content']}")
                error_response = create_error_response(
                    error_message=chunk["content"],
                    conversation_id=conversation_id,
                    message_id=message_id,
                    frontend_message_id=frontend_message_id,
                    message_order=message_order
                )
                yield f"data: {error_response.model_dump_json()}\n\n"
                return
            
            # 流式输出完整reasoning过程，不截断
            if chunk["type"] == "reasoning":
                logging.info(f"输出reasoning内容: {chunk['content'][:100]}...")
                response = create_reasoning_response(
                    content=chunk["content"],
                    conversation_id=conversation_id,
                    message_id=message_id,
                    frontend_message_id=frontend_message_id,
                    message_order=message_order,
                    thinking_elapsed_secs=elapsed_time,
                    step=chunk.get("step")
                )
                yield f"data: {response.model_dump_json()}\n\n"
            
            # 收集最终结果数据
            elif chunk["type"] in ["element", "ggb_commands"]:
                logging.info(f"收到{chunk['type']}数据: {chunk['content'][:200]}...")
                if chunk["type"] == "element":
                    logging.info(f"元素提取完成，内容: {chunk['content']}")
                elif chunk["type"] == "ggb_commands":
                    final_result = chunk["content"]
                    logging.info(f"GGB命令生成完成，内容长度: {len(final_result)}")
                total_tokens += len(chunk["content"]) // 4
            
            # 收集HTML代码
            elif chunk["type"] == "html_code":
                final_html = chunk["content"]
                logging.info(f"HTML生成完成，内容长度: {len(final_html)}")
            
            # 处理其他类型的chunk
            elif chunk["type"] in ["complete"]:
                logging.info(f"收到{chunk['type']}类型数据")
        
        logging.info(f"流式处理完成，总共处理了{chunk_count}个chunk")

        # 3. 发送最终结果
        if final_result:
            result_response = create_result_response(
                content=final_result,
                conversation_id=conversation_id,
                message_id=message_id,
                frontend_message_id=frontend_message_id,
                message_order=message_order,
                accumulated_token_usage=total_tokens,
                step="final_result"
            )
            yield f"data: {result_response.model_dump_json()}\n\n"
        
        # 4. 发送HTML代码
        if final_html:
            html_response = create_html_code_response(
                content=final_html,
                conversation_id=conversation_id,
                message_id=message_id,
                frontend_message_id=frontend_message_id,
                message_order=message_order,
                step="final_html"
            )
            yield f"data: {html_response.model_dump_json()}\n\n"

        # 5. 发送完成信号
        final_elapsed_time = time.time() - start_time
        completion_response = create_complete_response(
            conversation_id=conversation_id,
            message_id=message_id,
            frontend_message_id=frontend_message_id,
            message_order=message_order,
            accumulated_token_usage=total_tokens,
            thinking_elapsed_secs=final_elapsed_time
        )
        yield f"data: {completion_response.model_dump_json()}\n\n"

    except Exception as e:
        logging.error(f"Error in process_message_stream: {str(e)}")
        error_response = create_error_response(
            error_message=f"处理过程中发生错误: {str(e)}",
            conversation_id=conversation_id,
            message_id=message_id,
            frontend_message_id=frontend_message_id,
            message_order=message_order
        )
        yield f"data: {error_response.model_dump_json()}\n\n"

@app.post("/api/v2/chat")
async def chat_v2(request: ChatRequest):
    """统一的聊天接口 - 使用简化的流式响应格式"""
    # 生成ID
    conversation_id = str(request.conversation_id or 1)
    message_id = str(uuid.uuid4())
    frontend_message_id = str(uuid.uuid4())
    message_order = request.message_order or 1
    
    return StreamingResponse(
        process_message_stream(
            conversation_id=conversation_id,
            message_id=message_id,
            frontend_message_id=frontend_message_id,
            user_message=request.message,
            message_order=message_order,
            image_base64=request.image_base64
        ),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
            "Access-Control-Allow-Origin": "*",
            "Access-Control-Allow-Methods": "GET, POST, PUT, DELETE, OPTIONS",
            "Access-Control-Allow-Headers": "Content-Type, Authorization"
        }
    )

@app.post("/chat")
async def chat_legacy(request: ChatRequest):
    """兼容旧版本的聊天接口"""
    return await chat_v2(request)

@app.get("/health")
async def health_check():
    """健康检查接口"""
    return {"status": "healthy", "timestamp": datetime.now().isoformat()}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(
        app, 
        host="0.0.0.0", 
        port=8001,
        timeout_keep_alive=0,  # 移除keep-alive超时
        timeout_graceful_shutdown=0  # 移除优雅关闭超时
    )
