import os
from http import HTTPStatus
import dashscope
from dashscope import Generation

from fastapi import FastAPI, WebSocket, WebSocketDisconnect
import aiohttp
import asyncio
import json

from common.logger import logger


# 若使用新加坡地域的模型，请释放下列注释
# dashscope.base_http_api_url = "https://dashscope-intl.aliyuncs.com/api/v1"

# DASHSCOPE_API_KEY = "sk-85dc37249bf94294b5d3edec7e6c583f"
# DASHSCOPE_QWEN_STREAM_API_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"  # 请根据官方最新文档确认

# async def dashscope_stream_api(websocket: WebSocket):
#     await websocket.accept()
#     print("Client connected")
#     # 1. 准备工作：配置API Key
#     # 建议通过环境变量配置API Key，避免硬编码。
#     try:
#         dashscope.api_key = "sk-85dc37249bf94294b5d3edec7e6c583f"
#     except KeyError:
#         logger.error("请设置环境变量 DASHSCOPE_API_KEY")
#         raise ValueError("请设置环境变量 DASHSCOPE_API_KEY")
#
#     data = await websocket.receive_text()
#     input_data = json.loads(data)
#     prompt = input_data.get("prompt", "")
#     if not prompt:
#         await websocket.send_text(json.dumps({"error": "No prompt provided"}))
#         return
#
#     # 2. 发起流式请求
#     messages = [
#         {"role": "system", "content": "You are a helpful assistant."},
#         {"role": "user", "content": prompt},
#     ]
#
#     try:
#         responses = Generation.call(
#             model="qwen-plus",
#             messages=messages,
#             result_format="message",
#             stream=True,
#             # 关键：设置为True以获取增量输出，性能更佳。
#             incremental_output=True,
#         )
#
#         # 3. 处理流式响应
#         content_parts = []
#         print("AI: ", end="", flush=True)
#
#         for resp in responses:
#             if resp.status_code == HTTPStatus.OK:
#                 content = resp.output.choices[0].message.content
#                 request_id = resp.request_id
#                 finish_reason =  resp.output.choices[0].finish_reason
#                 print(content, end="", flush=True)
#                 content_parts.append(content)
#
#                 await websocket.send_text(json.dumps({"request_id": request_id, "finish_reason": finish_reason,"text": content}))
#
#                 # 检查是否是最后一个包
#                 if resp.output.choices[0].finish_reason == "stop":
#                     usage = resp.usage
#                     print("\n--- 请求用量 ---")
#                     print(f"输入 Tokens: {usage.input_tokens}")
#                     print(f"输出 Tokens: {usage.output_tokens}")
#                     print(f"总计 Tokens: {usage.total_tokens}")
#             else:
#                 # 处理错误情况
#                 logger.error(
#                     f"\n请求失败: request_id={resp.request_id}, code={resp.code}, message={resp.message}"
#                 )
#                 break
#
#         full_response = "".join(content_parts)
#         # print(f"\n--- 完整回复 ---\n{full_response}")
#
#     except Exception as e:
#         logger.error(f"发生未知错误: {e}")

async def dashscope_stream_api(websocket: WebSocket):
    await websocket.accept()
    logger.info("WebSocket client connected")

    try:
        # 设置 API Key
        dashscope.api_key = "sk-85dc37249bf94294b5d3edec7e6c583f"

        # 持续监听客户端消息
        while True:
            try:
                # 接收客户端消息
                data = await websocket.receive_text()

                logger.info(f"Received prompt: {data}")

                input_data = json.loads(data)
                prompt = input_data.get("prompt", "")

                if not prompt:
                    await websocket.send_text(json.dumps({"error": "No prompt provided"}))
                    continue

                # 处理流式请求（原有逻辑）
                messages = [
                    {"role": "system", "content": "You are a helpful assistant."},
                    {"role": "user", "content": prompt},
                ]

                responses = Generation.call(
                    model="qwen-plus",
                    messages=messages,
                    result_format="message",
                    stream=True,
                    incremental_output=True,
                )

                content_parts = []
                # print("AI: ", end="", flush=True)
                for resp in responses:
                    if resp.status_code == HTTPStatus.OK:
                        content = resp.output.choices[0].message.content
                        request_id = resp.request_id
                        finish_reason = resp.output.choices[0].finish_reason
                        # print(content, end="", flush=True)
                        content_parts.append(content)
                        await websocket.send_text(json.dumps({
                            "request_id": request_id,
                            "finish_reason": finish_reason,
                            "text": content
                        }))

                        if resp.output.choices[0].finish_reason == "stop":
                            usage = resp.usage
                            print(f"请求完成 - 输入: {usage.input_tokens}, 输出: {usage.output_tokens}")
                    else:
                        logger.error(
                            f"请求失败: request_id={resp.request_id}, code={resp.code}, message={resp.message}")
                        break

            except Exception as e:
                logger.error(f"处理请求时发生错误: {e}")
                # 继续监听下一个请求而不是断开连接

    except WebSocketDisconnect:
        logger.info("WebSocket client disconnected")
    except Exception as e:
        logger.error(f"WebSocket连接异常: {e}")
    finally:
        logger.info("WebSocket连接关闭")
