import json
import os
import time

import httpx
import pandas as pd
import requests
import base64
import re

from asgiref.sync import sync_to_async
from channels.db import database_sync_to_async
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponseBadRequest, JsonResponse, HttpResponseNotAllowed, FileResponse, Http404, \
    StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
from openai import OpenAI
from rest_framework import status
from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from django.core.cache import cache  # 新增：Redis缓存

from .models import Conversation, Message
from .serializers import ConversationList
from .tasks import process_llm_streaming_task  # 新增：导入Celery任务

client = OpenAI(
    api_key="sk-064abae05a144789b9d1e5aebae9f567",  # 配置API-KEY
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",  # 可修改相关url
)

# all_url = "192.168.1.101"
all_url = "127.0.0.1"
# all_url = "172.20.0.89"

def chat_llm(prompt: str, system: str) -> json:
    """
    聊天大模型对话
    """
    completion = client.chat.completions.create(
        # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        model="qwen3-32b",
        messages=[
            {"role": "system", "content": system},
            {"role": "user", "content": prompt},
        ],
        stream=True,  # 启用流式输出
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
        extra_body={"enable_thinking": False},
    )
    print(completion.model_dump_json())
    return completion.model_dump_json()

def chat_llm_stream(prompt: str, system: str) -> json:
    """
    聊天大模型对话
    """
    completion = client.chat.completions.create(
        # 模型列表：https://help.aliyun.com/zh/model-studio/getting-started/models
        model="qwen3-32b",
        messages=[
            {"role": "system", "content": system},
            {"role": "user", "content": prompt},
        ],
        stream=True,  # 启用流式输出
        # Qwen3模型通过enable_thinking参数控制思考过程（开源版默认True，商业版默认False）
        # 使用Qwen3开源版模型时，若未启用流式输出，请将下行取消注释，否则会报错
        extra_body={"enable_thinking": False},
    )

    for chunk in completion:
        if chunk.choices[0].delta.content is not None:
            yield chunk.choices[0].delta.content


def json_extract(json_datas: str):
    """
    从结果中提取json
    """
    # 用正则匹配代码块里的内容（假设只有一个json块）
    pattern = r"```json\n(.*?)\n```"

    match = re.search(pattern, json_datas, re.DOTALL)
    if match:
        json_str = match.group(1)  # 提取JSON字符串
        try:
            data = json.loads(json_str)
            return data
        except json.JSONDecodeError as e:
            print("JSON解析错误:", e)
    else:
        print("未找到匹配的JSON代码块")


def save_report(all_score: int, score_all: list, dimensions, question):
    url = f"http://{all_url}/get_doc"
    data1 = {
        "data": {
            "all_score": all_score,
            "score_all": score_all,
            "dimensions": dimensions,
            "eval": question["eval"]
        }
    }
    print(data1)
    response = requests.post(url, json=data1)
    # 检查是否成功并保存返回的 Word 文件
    if response.status_code == 200:
        with open("./static/reports/downloaded_report.docx", "wb") as f:
            f.write(response.content)
        print("Word 文档已保存为 downloaded_report.docx")
    else:
        print("请求失败：", response.status_code)


def encode_url(img_byte: bytes) -> str:
    """
    转化成api所需要的格式
    """
    # 将图片 bytes 编码为 base64
    encoded_image = base64.b64encode(img_byte).decode('utf-8')

    # 构造 data URL（假设为 jpeg，可以根据上传内容调整为 png 等）
    data_url = f"data:image/jpeg;base64,{encoded_image}"
    return data_url


@login_required(login_url="/login")
async def chat(request):
    """
    修改后的chat视图，使用Celery异步处理 + Redis缓存 + 保持流式输出
    """
    if request.method != "POST":
        return render(request, "index.html")

    try:
        data = json.loads(request.body)
        messages = data.get("messages", [])
    except (ValueError, KeyError):
        return HttpResponseBadRequest("Invalid json")

    # ▶️ 1.1 找到/创建会话
    conv_id = data.get("conversation_id")
    if conv_id:
        conversation = await database_sync_to_async(get_object_or_404)(
            Conversation, pk=conv_id, user=request.user
        )
    else:
        conversation = await database_sync_to_async(Conversation.objects.create)(
            user=request.user, title=messages[0]["content"][:255]
        )

    # ▶️ 1.2 落库用户发来的多条消息
    message_objects = []
    for msg in messages:
        message_objects.append(Message(
            conversation=conversation,
            role="user",
            content=msg["content"]
        ))

    await database_sync_to_async(Message.objects.bulk_create)(message_objects)

    # ▶️ 新增：生成缓存键和任务ID
    cache_key = f"chat_task_{conversation.id}_{int(time.time())}"

    # ▶️ 新增：检查Redis缓存是否已有相似请求的结果
    messages_hash = hash(str(messages))
    cached_result_key = f"chat_result_{messages_hash}"
    cached_result = cache.get(cached_result_key)

    if cached_result and not data.get("force_refresh", False):
        # 如果有缓存结果且不强制刷新，直接返回缓存的流式数据
        print("Using cached result")

        async def generate_cached_response():
            # 模拟流式输出缓存的结果
            content = cached_result.get("content", "")
            chunk_size = 10  # 每次发送10个字符

            for i in range(0, len(content), chunk_size):
                chunk = content[i:i + chunk_size]
                yield f"data: {json.dumps({'content': chunk, 'conversation_id': conversation.id})}\n\n"


            # 发送完成信号
            final_payload = {
                "conversation_id": conversation.id,
                "done": True,
                **cached_result.get("final_data", {})
            }
            yield f"data: {json.dumps(final_payload)}\n\n"

    else:
        # ▶️ 新增：启动Celery异步任务
        task = process_llm_streaming_task.delay(
            messages=messages,
            conversation_id=conversation.id,
            cache_key=cache_key,
            user_id=request.user.id
        )

        # 将任务ID存储到缓存中，方便后续查询
        cache.set(f"{cache_key}_task_id", task.id, timeout=300)

        async def generate_stream_response():
            """
            从Redis缓存中读取Celery任务产生的流式数据
            """
            full_response = ""
            response_type = None
            cache_list_key = f"{cache_key}_stream"
            processed_count = 0

            # 最大等待时间（秒）
            max_wait_time = 300  # 5分钟
            start_time = time.time()

            while time.time() - start_time < max_wait_time:
                # 检查任务状态
                task_status = cache.get(f"{cache_key}_status")

                if task_status == "error":
                    error_msg = cache.get(f"{cache_key}_error", "Unknown error")
                    yield f"data: {json.dumps({'error': error_msg})}\n\n"
                    break

                # 获取流式数据
                stream_data = cache.get(cache_list_key, [])

                # 处理新的数据块
                for i in range(processed_count, len(stream_data)):
                    chunk = stream_data[i]

                    if "error" in chunk:
                        yield f"data: {json.dumps(chunk)}\n\n"
                        return

                    if "done" in chunk:
                        # 任务完成
                        final_payload = chunk.get("final_payload", {})
                        yield f"data: {json.dumps(final_payload)}\n\n"

                        # ▶️ 新增：将结果缓存起来供后续使用
                        messages_hash = hash(str(messages))
                        cache.set(f"chat_result_{messages_hash}", {
                            "content": full_response,
                            "final_data": final_payload
                        }, timeout=3600)  # 缓存1小时

                        return

                    if "content" in chunk:
                        content_chunk = chunk["content"]
                        full_response += content_chunk

                        yield f"data: {json.dumps({'content': content_chunk, 'conversation_id': conversation.id})}\n\n"

                processed_count = len(stream_data)

                # 如果任务完成，退出循环
                if task_status == "completed":
                    break


            # 如果超时，返回错误
            if time.time() - start_time >= max_wait_time:
                yield f"data: {json.dumps({'error': 'Request timeout'})}\n\n"

            # ▶️ 1.4 落库 AI 回复（移到任务完成后）
            if full_response:
                await database_sync_to_async(Message.objects.create)(
                    conversation=conversation,
                    role="assistant",
                    content=full_response
                )

                # ▶️ 1.5 更新 conversation
                conversation.last_message = full_response[:255]
                await database_sync_to_async(conversation.save)()

    response = StreamingHttpResponse(
        generate_cached_response() if cached_result and not data.get("force_refresh",
                                                                     False) else generate_stream_response(),
        content_type='text/event-stream'
    )
    response['Cache-Control'] = 'no-cache'
    response['X-Accel-Buffering'] = 'no'
    return response

@login_required(login_url="/login")
def history_list(request):
    """
    返回当前登录用户的全部会话，最新更新时间倒序。
    """
    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')

    qs = (
        Conversation.objects
        .filter(user=request.user)
        .order_by("-updated_at")
    )
    data = ConversationList(qs, many=True).data
    payload = {
        "conversations": data,
    }
    return JsonResponse(payload, status=201)

@api_view(['DELETE'])
@login_required(login_url="/login")
@permission_classes([IsAuthenticated])
def history_delete(request, pk):
    conv = get_object_or_404(Conversation, pk=pk, user=request.user)
    conv.delete()
    return Response(status=status.HTTP_204_NO_CONTENT)

@api_view(['GET'])
@permission_classes([IsAuthenticated])
@login_required(login_url="/login")
def history_conversations(request, conversation_id):
    try:
        conversation = Conversation.objects.prefetch_related('messages').get(id=conversation_id, user=request.user)
    except Conversation.DoesNotExist:
        return Response({'error': 'Conversation not found'}, status=404)

    serializer = ConversationList(conversation)
    print(serializer.data)
    return Response(serializer.data)

@csrf_exempt
@login_required(login_url="/login")
def get_ghq_questions(request):
    if request.method == "POST":
        body_unicode = request.body.decode('utf-8')
        data = json.loads(body_unicode)
        question_name = data.get("name", "")
        # 假设 questions.json 存在于当前 根目录的static下的question
        file_path = os.path.join(settings.BASE_DIR, "static", "question", "question_data.json")
        with open(file_path, "r", encoding='utf-8') as f:
            questions = json.load(f)
        for question in questions:
            if question_name == question["title"]:
                return JsonResponse(question["questions"], safe=False)


    # ✅ 增加这句以防 GET 或其他非法方法导致报错
    return HttpResponseNotAllowed(['POST'])

@login_required(login_url="/login")
def question_completed(request):
    return render(request, 'completed.html')


