from fastapi import APIRouter, Depends, HTTPException, Body
from fastapi.responses import StreamingResponse
from sqlalchemy.orm import Session
from ..database import get_db
from ..auth.jwt_handler import get_current_user
from ..models.user import User
from pydantic import BaseModel
from typing import Any, Dict
import os
from openai import OpenAI

router = APIRouter(
    tags=['chat'],
)

# 创建一个通用的请求模型来处理前端发送的任何JSON数据
class ChatRequest(BaseModel):
    # 由于前端没有指定具体的请求结构，使用动态字段
    class Config:
        extra = 'allow'

@router.post('/chat')
async def chat(
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    try:
        # 从请求中提取参数，保持与前端 Next.js 版本一致
        messages = request.get('messages', [])
        api_key = request.get('apiKey')
        system_prompt = request.get('systemPrompt')
        model = request.get('model', 'glm-4v-flash')
        temperature = request.get('temperature', 0.7)
        max_tokens = request.get('max_tokens', 2000)
        top_p = request.get('top_p', 0.7)
        base_url = request.get('baseURL', 'https://open.bigmodel.cn/api/paas/v4')

        # 打印输入参数
        print("=== Chat API Input Parameters ===")
        print(f"User: {current_user.email}")
        print(f"Model: {model}")
        print(f"Temperature: {temperature}")
        print(f"Max Tokens: {max_tokens}")
        print(f"Top P: {top_p}")
        print(f"Base URL: {base_url}")
        print(f"System Prompt: {system_prompt}")
        print(f"Messages Count: {len(messages)}")
        for i, msg in enumerate(messages):
            print(f"Message {i+1} - Role: {msg.get('role')}, Content: {msg.get('content', '')[:100]}{'...' if len(msg.get('content', '')) > 100 else ''}")
        print("==================================")

        final_api_key = api_key or os.getenv('ZHIPU_API_KEY')
        if not final_api_key:
            raise HTTPException(status_code=400, detail='未提供 API Key')

        # 组装消息
        ai_messages = []
        if system_prompt:
            ai_messages.append({"role": "system", "content": system_prompt})
        for msg in messages:
            if msg.get('content'):
                ai_messages.append(msg)

        # 初始化 OpenAI 客户端（用于智谱 API）
        client = OpenAI(api_key=final_api_key, base_url=base_url)

        # 创建生成器以流式输出token
        async def token_stream():
            try:
                # openai python sdk 当前为同步迭代器，这里使用线程封装不必要；直接同步遍历并yield
                completion = client.chat.completions.create(
                    model=model,
                    messages=ai_messages,
                    temperature=temperature,
                    max_tokens=max_tokens,
                    top_p=top_p,
                    stream=True,
                )
                for chunk in completion:
                    content = None
                    if chunk.choices:
                        delta = chunk.choices[0].delta
                        # 在OpenAI Python SDK v1中，delta是对象而非字典
                        content = getattr(delta, 'content', None)
                    if content:
                        yield content
            except Exception as e:
                # 将错误作为流的最后一段返回，便于前端显示
                yield f"\n[error]: {str(e)}"

        return StreamingResponse(token_stream(), media_type='text/plain; charset=utf-8')
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e) or '处理请求时发生错误')