from fastapi import APIRouter, Depends, HTTPException, Body
from fastapi.responses import StreamingResponse
from sqlalchemy.orm import Session
from ..database import get_db
from ..auth.jwt_handler import get_current_user
from ..models.user import User
from pydantic import BaseModel
from typing import Any, Dict
import os
from openai import OpenAI
import datetime

router = APIRouter(
    tags=['chat'],
)

# 创建一个通用的请求模型来处理前端发送的任何JSON数据
class ChatRequest(BaseModel):
    # 由于前端没有指定具体的请求结构，使用动态字段
    class Config:
        extra = 'allow'

@router.post('/chat')
async def chat(
    request: Dict[str, Any] = Body(...),
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    try:
        # 从请求中提取参数，保持与前端 Next.js 版本一致
        messages = request.get('messages', [])
        api_key = request.get('apiKey')
        system_prompt = request.get('systemPrompt')
        model = request.get('model', 'glm-4v-flash')
        temperature = request.get('temperature', 0.7)
        max_tokens = request.get('max_tokens', 2000)
        top_p = request.get('top_p', 0.7)
        base_url = request.get('baseURL', 'https://open.bigmodel.cn/api/paas/v4')
        
        # 从模型配置中提取参数
        presence_penalty = request.get('presence_penalty')
        frequency_penalty = request.get('frequency_penalty')
        n = request.get('n')
        stop = request.get('stop')
        logit_bias = request.get('logit_bias')
        stream = request.get('stream', True)
        endpoint_url = request.get('endpoint_url')
        file_path = request.get('file_path')
        model_api_key = request.get('api_key')

        # 本地部署模型配置
        LOCAL_MODEL_API_KEY = os.getenv('LOCAL_MODEL_API_KEY', '')
        LOCAL_MODEL_BASE_URL = os.getenv('LOCAL_MODEL_BASE_URL', '')
        
        # 优先使用传入的API key，否则使用本地模型API key
        final_api_key = api_key or LOCAL_MODEL_API_KEY
        if not final_api_key:
            raise HTTPException(status_code=400, detail='未提供 API Key')

        # 组装消息，转换为本地部署模型格式
        ai_messages = []
        if system_prompt:
            # 系统消息也需要转换为数组格式
            ai_messages.append({
                "role": "system", 
                "content": [{"type": "text", "text": system_prompt}]
            })
        for msg in messages:
            if msg.get('content'):
                # 将消息内容转换为数组格式，符合本地部署模型要求
                content = msg.get('content')
                if isinstance(content, str):
                    # 如果是字符串，转换为数组格式
                    formatted_content = [{"type": "text", "text": content}]
                else:
                    # 如果已经是数组格式，直接使用
                    formatted_content = content
                
                ai_messages.append({
                    "role": msg.get('role'),
                    "content": formatted_content
                })

        # 初始化 OpenAI 客户端（用于本地部署模型）
        # 优先使用模型配置中的endpoint_url，如果没有则使用本地模型base_url
        final_base_url = endpoint_url or LOCAL_MODEL_BASE_URL
        client = OpenAI(api_key=final_api_key, base_url=final_base_url)
        # 打印输入参数
        print("=== Chat API Input Parameters ===")
        print(f"User: {current_user.email}")
        print(f"Model: {model}")
        print(f"Temperature: {temperature}")
        print(f"Max Tokens: {max_tokens}")
        print(f"Top P: {top_p}")
        print(f"Base URL: {base_url}")
        print(f"Final Base URL: {final_base_url}")
        print(f"System Prompt: {system_prompt}")
        print(f"Messages Count: {len(messages)}")
        print(f"ai_messages: {len(ai_messages)}")
        print("=== 消息格式 ===")
        for i, msg in enumerate(ai_messages):
            print(f"Message {i+1}: {msg}")
        print("================")
        
        # 打印模型配置参数
        print("=== Model Configuration Parameters ===")
        print(f"Presence Penalty: {presence_penalty}")
        print(f"Frequency Penalty: {frequency_penalty}")
        print(f"N: {n}")
        print(f"Stop: {stop}")
        print(f"Logit Bias: {logit_bias}")
        print(f"Stream: {stream}")
        print(f"Endpoint URL: {endpoint_url}")
        print(f"File Path: {file_path}")
        print(f"Model API Key: {model_api_key}")
        print("=====================================")
        
        for i, msg in enumerate(messages):
            print(f"Message {i+1} - Role: {msg.get('role')}, Content: {msg.get('content', '')[:100]}{'...' if len(msg.get('content', '')) > 100 else ''}")
        print("==================================")
        
        # 记录请求开始时间
        start_time = datetime.datetime.now()
        print(f"=== 开始调用大模型API - {start_time.strftime('%Y-%m-%d %H:%M:%S')} ===")
        
        # 创建生成器以流式输出token
        async def token_stream():
            try:
                # openai python sdk 当前为同步迭代器，这里使用线程封装不必要；直接同步遍历并yield
                # 构建completion参数
                completion_params = {
                    'model': model or 'Qwen3-32B',  # 默认使用Qwen3-32B模型
                    'messages': ai_messages,
                    'temperature': temperature,
                    'max_tokens': max_tokens,
                    'top_p': top_p,
                    'stream': stream,
                }
                
                # 添加可选的模型配置参数
                if presence_penalty is not None:
                    completion_params['presence_penalty'] = presence_penalty
                if frequency_penalty is not None:
                    completion_params['frequency_penalty'] = frequency_penalty
                if n is not None:
                    completion_params['n'] = n
                if stop is not None:
                    completion_params['stop'] = stop
                if logit_bias is not None:
                    completion_params['logit_bias'] = logit_bias
                
                completion = client.chat.completions.create(**completion_params)
                print("=== 开始接收大模型响应 ===")
                accumulated_response = ""
                chunk_count = 0
                
                for chunk in completion:
                    chunk_count += 1
                    content = None
                    if chunk.choices:
                        delta = chunk.choices[0].delta
                        # 在OpenAI Python SDK v1中，delta是对象而非字典
                        content = getattr(delta, 'content', None)
                    
                    # 打印每个chunk的详细信息
                    print(f"Chunk {chunk_count}: {chunk}")
                    
                    if content:
                        accumulated_response += content
                        print(f"Content: {content}")
                        yield content
                
                # 记录请求结束时间和耗时
                end_time = datetime.datetime.now()
                duration = (end_time - start_time).total_seconds()
                
                print("=== 大模型响应完成 ===")
                print(f"请求开始时间: {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
                print(f"请求结束时间: {end_time.strftime('%Y-%m-%d %H:%M:%S')}")
                print(f"总耗时: {duration:.2f}秒")
                print(f"总chunk数量: {chunk_count}")
                print(f"完整响应内容: {accumulated_response}")
                print("=========================")
            except Exception as e:
                # 记录错误信息
                error_time = datetime.datetime.now()
                print(f"=== 大模型API调用出错 - {error_time.strftime('%Y-%m-%d %H:%M:%S')} ===")
                print(f"错误信息: {str(e)}")
                print(f"错误类型: {type(e).__name__}")
                print("=========================")
                
                # 将错误作为流的最后一段返回，便于前端显示
                yield f"\n[error]: {str(e)}"

        return StreamingResponse(token_stream(), media_type='text/plain; charset=utf-8')
    except HTTPException:
        raise
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e) or '处理请求时发生错误')