import json
import os

from django.http import StreamingHttpResponse
from django.shortcuts import render
from openai import OpenAI
from rest_framework.response import Response
from rest_framework.views import APIView


# Create your views here.


class QaAPIView(APIView):
    """
    普通的单轮对话
    """

    def post(self, request):
        question = request.data.get("question")
        userid = request.data.get("userid")
        print(f"用户ID:{userid}")
        if not question:
            return Response({
                "code": 0,
                "message": "参数不能为空"

            })
        client = OpenAI(
            # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
        )

        completion = client.chat.completions.create(
            # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
            model="qwen-plus",
            messages=[
                {"role": "system", "content": "你是一个智能助手"},
                {"role": "user", "content": question},
            ],
            # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
            # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
            # extra_body={"enable_thinking": False},
        )

        result = completion.choices[0].message.content
        print(result)

        return Response({
            "code": 1,
            "message": "SUCCESS",
            "data": {
                "question": question,
                "answer": result
            }
        })


class StreamQaAPiView(APIView):
    """
    流式响应-单轮对话
    """
    """
    通义千问流式输出API接口
    支持通过SSE协议向前端实时推送响应内容
    """
    permission_classes = []  # 实际项目中应根据需求设置权限

    def post(self, request):
        # 获取用户输入
        user_input = request.data.get('question', '请介绍一下你自己')

        # 验证输入
        if not user_input:
            return StreamingHttpResponse(
                self.event_stream(json.dumps({'error': '请提供提问内容'})),
                content_type='text/event-stream'
            )

        # 初始化OpenAI客户端（通义千问兼容模式）
        client = OpenAI(
            api_key=os.getenv("DASHSCOPE_API_KEY"),
            base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
        )

        # 构建消息
        messages = [
            {"role": "system", "content": "你是一个助手"},
            {"role": "user", "content": user_input}
        ]

        # 生成流式响应
        def stream():
            try:
                # 调用通义千问流式接口
                response = client.chat.completions.create(
                    model="qwen-plus",
                    messages=messages,
                    stream=True,
                    temperature=0.7,
                    max_tokens=1024
                )

                # 逐块处理响应
                for chunk in response:
                    content = chunk.choices[0].delta.content
                    if content:
                        # 包装成SSE格式
                        yield self.event_stream(json.dumps({
                            'type': 'message',
                            'content': content,
                            'finished': False
                        }))

                # 发送结束标志
                yield self.event_stream(json.dumps({
                    'type': 'complete',
                    'content': '',
                    'finished': True
                }))

            except Exception as e:
                # 发送错误信息
                yield self.event_stream(json.dumps({
                    'type': 'error',
                    'content': f'发生错误: {str(e)}',
                    'finished': True
                }))

        # 返回流式响应
        return StreamingHttpResponse(
            stream(),
            content_type='text/event-stream'
        )

    @staticmethod
    def event_stream(data):
        """格式化SSE事件流格式"""
        return f'data: {data}\n\n'
