import json
import os
import time

import httpx
import pandas as pd
import requests
import base64
import re

from asgiref.sync import sync_to_async
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseBadRequest, JsonResponse, HttpResponseNotAllowed, FileResponse, Http404, \
    StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
from openai import OpenAI
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response

from psychology_questions.models import (
    Questionnaire, Dimension, Question,
    UserQuestionnaire, UserDimensionScore, UserAnswer, UserQuestionnaireReport
)
from .models import Conversation, Message

from .serializers import ConversationList

client = OpenAI(
    api_key="sk-064abae05a144789b9d1e5aebae9f567",  # 配置API-KEY
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 可修改相关url
)

all_url = "192.168.1.101"
# all_url = "172.20.0.89"

def chat_llm(prompt: str, system: str) -> json:
    """
    聊天大模型对话
    """
    completion = client.chat.completions.create(
        # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        model="qwen3-32b",
        messages=[
            {"role": "system", "content": system},
            {"role": "user", "content": prompt},
        ],
        stream=True,  # 启用流式输出
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
        extra_body={"enable_thinking": False},
    )
    print(completion.model_dump_json())
    return completion.model_dump_json()

def chat_llm_stream(prompt: str, system: str) -> json:
    """
    聊天大模型对话
    """
    completion = client.chat.completions.create(
        # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        model="qwen3-32b",
        messages=[
            {"role": "system", "content": system},
            {"role": "user", "content": prompt},
        ],
        stream=True,  # 启用流式输出
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
        extra_body={"enable_thinking": False},
    )

    for chunk in completion:
        if chunk.choices[0].delta.content is not None:
            yield chunk.choices[0].delta.content


def json_extract(json_datas: str):
    """
    从结果中提取json
    """
    # 用正则匹配代码块里的内容（假设只有一个json块）
    pattern = r"```json\n(.*?)\n```"

    match = re.search(pattern, json_datas, re.DOTALL)
    if match:
        json_str = match.group(1)  # 提取JSON字符串
        try:
            data = json.loads(json_str)
            return data
        except json.JSONDecodeError as e:
            print("JSON解析错误:", e)
    else:
        print("未找到匹配的JSON代码块")


def save_report(all_score: int, score_all: list, dimensions, question):
    url = f"http://{all_url}/get_doc"
    data1 = {
        "data": {
            "all_score": all_score,
            "score_all": score_all,
            "dimensions": dimensions,
            "eval": question["eval"]
        }
    }
    print(data1)
    response = requests.post(url, json=data1)
    # 检查是否成功并保存返回的 Word 文件
    if response.status_code == 200:
        with open("./static/reports/downloaded_report.docx", "wb") as f:
            f.write(response.content)
        print("Word 文档已保存为 downloaded_report.docx")
    else:
        print("请求失败：", response.status_code)


def encode_url(img_byte: bytes) -> str:
    """
    转化成api所需要的格式
    """
    # 将图片 bytes 编码为 base64
    encoded_image = base64.b64encode(img_byte).decode('utf-8')

    # 构造 data URL（假设为 jpeg，可以根据上传内容调整为 png 等）
    data_url = f"data:image/jpeg;base64,{encoded_image}"
    return data_url


@login_required(login_url="/login")
async def chat(request):
    """使用 Channels 的异步视图"""
    if request.method != "POST":
        return render(request, "index.html")

    try:
        data = json.loads(request.body)
        messages = data.get("messages", [])
    except (ValueError, KeyError):
        return HttpResponseBadRequest("Invalid json")

    # ▶️ 1.1 找到/创建会话
    conv_id = data.get("conversation_id")
    print("conv_id:", conv_id)
    flag = bool(conv_id)
    if conv_id:
        conversation = await database_sync_to_async(get_object_or_404)(
            Conversation, pk=conv_id, user=request.user
        )
        # 获取历史消息并添加到messages列表开头
        history_messages = await database_sync_to_async(list)(
            conversation.messages.all().order_by('created_at')
        )
        # 将历史消息转换为适当的格式并添加到messages列表开头
        formatted_history = [
            {"role": msg.role, "content": msg.content}
            for msg in history_messages
        ]
        messages = formatted_history + messages
    else:
        conversation = await database_sync_to_async(Conversation.objects.create)(
            user=request.user, title=messages[0]["content"]
        )
    print("formatted_history:", formatted_history)
    # ▶️ 1.2 落库用户发来的多条消息（仅保存新消息）
    message_objects = []
    for msg in messages:
        # 只保存新消息（历史消息已经在数据库中）
        if msg in formatted_history:
            continue
        message_objects.append(Message(
            conversation=conversation,
            role=msg["role"] if "role" in msg else "user",
            content=msg["content"]
        ))
    print("message_objects:", message_objects)
    if message_objects:  # 只有当有新消息时才保存
        await database_sync_to_async(Message.objects.bulk_create)(message_objects)

    # 这里需要替换为你实际的 URL
    if not flag:
        url = f"http://{all_url}:8088/process/"
    else:
        url = f"http://{all_url}:8088/chat/"

    async def generate_stream_response():
        full_response = ""
        response_type = None
        print("进入流式输出")
        try:
            async with httpx.AsyncClient(timeout=None) as client:
                async with client.stream("POST", url, json={"messages": messages}) as resp:
                    print("访问url")
                    async for line in resp.aiter_lines():
                        print(line.strip())
                        if line.strip():
                            try:
                                data_str = line[len("data:"):].strip()
                                chunk_data = json.loads(data_str)

                                if 'content' in chunk_data:
                                    content_chunk = chunk_data['content']
                                    full_response += content_chunk

                                    yield f"data: {json.dumps({'content': content_chunk, 'conversation_id': conversation.id})}\n\n"
                                if 'done' in chunk_data:
                                    # ▶️ 1.7 组装返回
                                    payload = {
                                        "conversation_id": conversation.id,
                                        "type_int": response_type,
                                        "done": True,
                                        # "conversation": data,
                                    }

                                    if response_type == 0:
                                        payload["questionnaire_list"] = [
                                            {
                                                "title_page": "情绪障碍筛查量表（SCARED）",
                                                "src_page": f"http://{all_url}:8000/questions/",  # 替换为实际地址
                                                "id_page": "anxiety_sas"
                                            },
                                        ]
                                    yield f"data: {json.dumps(payload)}\n\n"
                                    break
                                if 'type' in chunk_data:
                                    response_type = chunk_data['type']

                            except json.JSONDecodeError as e:
                                print(e)
                                continue

                # ▶️ 1.4 落库 AI 回复
                await database_sync_to_async(Message.objects.create)(
                    conversation=conversation,
                    role="assistant",
                    content=full_response
                )

                # ▶️ 1.5 更新 conversation
                conversation.last_message = full_response[:255]
                await database_sync_to_async(conversation.save)()

                # # ▶️ 1.6 获取会话列表
                # qs = await database_sync_to_async(list)(
                #     Conversation.objects
                #     .filter(user=request.user)
                #     .order_by("-updated_at")
                # )
                #
                # # 序列化数据
                # def serialize_conversations():
                #     return ConversationList(qs, many=True).data
                #
                # data = await sync_to_async(serialize_conversations)()

                # # ▶️ 1.7 组装返回
                # payload = {
                #     "conversation_id": conversation.id,
                #     "type_int": response_type,
                #     # "conversation": data,
                # }
                #
                # if response_type == 0:
                #     payload["questionnaire_list"] = [
                #         {
                #             "title_page": "情绪障碍筛查量表（SCARED）",
                #             "src_page": f"http://{all_url}:8000/questions/",  # 替换为实际地址
                #             "id_page": "anxiety_sas"
                #         },
                #     ]
                # print("payload:", payload["questionnaire_list"])
                # yield f"data: {json.dumps(payload)}\n\n"

        except Exception as e:
            print(f"Error in generate_stream_response: {e}")
            yield f"data: {json.dumps({'error': str(e)})}\n\n"

    response = StreamingHttpResponse(
        generate_stream_response(),
        content_type='text/event-stream'
    )
    response['Cache-Control'] = 'no-cache'
    response['X-Accel-Buffering'] = 'no'
    return response

@login_required(login_url="/login")
def history_list(request):
    """
    返回当前登录用户的全部会话，最新更新时间倒序。
    """
    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')

    qs = (
        Conversation.objects
        .filter(user=request.user)
        .order_by("-updated_at")
    )
    data = ConversationList(qs, many=True).data
    payload = {
        "conversations": data,
    }
    return JsonResponse(payload, status=201)

@api_view(['DELETE'])
@login_required(login_url="/login")
@permission_classes([IsAuthenticated])
def history_delete(request, pk):
    conv = get_object_or_404(Conversation, pk=pk, user=request.user)
    conv.delete()
    return Response(status=status.HTTP_204_NO_CONTENT)

@api_view(['GET'])
@permission_classes([IsAuthenticated])
@login_required(login_url="/login")
def history_conversations(request, conversation_id):
    try:
        conversation = Conversation.objects.prefetch_related('messages').get(id=conversation_id, user=request.user)
    except Conversation.DoesNotExist:
        return Response({'error': 'Conversation not found'}, status=404)

    serializer = ConversationList(conversation)
    print(serializer.data)
    return Response(serializer.data)

@csrf_exempt
@login_required(login_url="/login")
def get_ghq_questions(request):
    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')
        data = json.loads(body_unicode)
        question_name = data.get("name", "")
        # 假设 questions.json 存在于当前 根目录的static下的question
        file_path = os.path.join(settings.BASE_DIR, "static", "question", "question_data.json")
        with open(file_path, "r", encoding='utf-8') as f:
            questions = json.load(f)
        for question in questions:
            if question_name == question["title"]:
                return JsonResponse(question["questions"], safe=False)


    # ✅ 增加这句以防 GET 或其他非法方法导致报错
    return HttpResponseNotAllowed(['POST'])

@login_required(login_url="/login")
def question_completed(request):
    return render(request, 'completed.html')


@login_required(login_url="/login")
def question1(request):
    all_score = 0
    # 记录每一题的得分
    score_all = []
    # 维度记录
    dimensions = []
    if request.method == 'POST':
        try:
            body_unicode = request.body.decode('utf-8')
            data = json.loads(body_unicode)

            title = data.get("name", "")
            answers = data.get("answers", [])
            question_first = data.get("message", None)
            print(answers[0]["answer"])
            if not answers:
                return JsonResponse({"error": "没有 answers 数据"}, status=400)
            file_path = os.path.join(settings.BASE_DIR, "static", "question", "question_score.json")
            with open(file_path, "r", encoding='utf-8') as f:
                question_scores = json.load(f)["questionnaires"]
            for question in question_scores:
                if question["title"] == title:
                    eval_data = question["eval"]
                    first = question["score"][0]
                    second = question["score"][1]
                    third = question["score"][2]
                    for answer in answers:
                        if answer["answer"] == 0:
                            all_score += first
                            score_all.append(first)
                        elif answer["answer"] == 1:
                            all_score += second
                            score_all.append(second)
                        elif answer["answer"] == 2:
                            all_score += third
                            score_all.append(third)
                    dimension_datas = question["dimensions"]
                    for dimension in dimension_datas:
                        name = dimension["name"]
                        lists = dimension["list"]
                        score = 0
                        for list in lists:
                            choice = answers[list - 1]["answer"]
                            if choice == 0:
                                score += first
                            elif choice == 1:
                                score += second
                            elif choice == 2:
                                score += third

                        item = {
                            "name": name,
                            "score": score
                        }
                        dimensions.append(item)

                    save_report(all_score, score_all, dimensions, question)

            system_prompt = """您是一个心理学专家，请根据评测维度、评分解释、相关建议以及结合用户分数、用户问题来对用户心理状况进行分析，并给用户给予心理安慰以及这些心理状况产生的原因和相关的解决办法。请多生成一些相关的、好看的图标而非图片，最终所有文字全部以Markdown格式生成输出。"""
            dimension_texts = "".join([f"    {d['name']}: {d['score']}" for d in dimensions])
            eval_texts = "".join([f"    {e['name']}：{e['text']}" for e in eval_data])
            help_texts = "".join([f"    {e['help']}" for e in eval_data])
            prompt = f"""用户问题：{question_first}

【测评维度与用户分数】
{dimension_texts}

【评分解释】
    {eval_texts}

【相关建议】
    {help_texts}
"""
            llm_reply = json.loads(chat_llm(prompt, system_prompt))["choices"][0]["message"]["content"]

            return JsonResponse({
                "score": all_score,
                "group_scores": dimensions,
                "llm_reply": llm_reply
            })

        except Exception as e:
            print(e)
            return JsonResponse({"error": "服务器处理失败", "details": str(e)}, status=500)

    return JsonResponse({"error": "仅支持 POST"}, status=405)

