# python manage.py collectstatic --noinput
# uvicorn Django_Langchain_RAG.asgi:application --reload
from asgiref.sync import sync_to_async
from django.shortcuts import render
import json
import asyncio
from concurrent.futures import ThreadPoolExecutor
from django.http import StreamingHttpResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_http_methods
from langchain_ollama import OllamaLLM
import logging

from file.models import KnowledgeBase
from .utils.rag_utils import retrieve_text_vectors,retrieve_image_vectors,generate_with_ollama
from django.views import View

logger = logging.getLogger(__name__)

# 初始化 Ollama 模型
llm = OllamaLLM(model="qwen2.5vl:7b", temperature=0.7)

# 创建线程池执行器
executor = ThreadPoolExecutor(max_workers=1)


async def chat_stream(request):
    user_input = request.GET.get('message', '')
    if not user_input:
        return StreamingHttpResponse("请输入问题。", content_type='text/plain')

    loop = asyncio.get_event_loop()

    async def generate_async():
        def sync_stream():
            try:
                for chunk in llm.stream(user_input):
                    yield chunk
            except Exception as e:
                logger.error(f"模型调用失败: {e}")
                raise e

        try:
            it = await loop.run_in_executor(executor, sync_stream)
            for chunk in it:
                yield f"data: {json.dumps({'text': chunk}, ensure_ascii=False)}\n\n"
            # 流结束时发送一个结束标志
            yield f"data: {json.dumps({'end': True}, ensure_ascii=False)}\n\n"
        except Exception as e:
            yield f"data: {json.dumps({'error': str(e)}, ensure_ascii=False)}\n\n"

    return StreamingHttpResponse(generate_async(), content_type='text/event-stream')


def chat_view(request):
    return render(request, 'chat/chat.html')


def rag_query_view(request):
    knowledge_bases = KnowledgeBase.objects.all()
    return render(request, 'chat/rag_query2.html',{'knowledge_bases': knowledge_bases})

@method_decorator(csrf_exempt, name='dispatch')
class RagQueryView(View):
    async def post(self, request):
        data = json.loads(request.body)
        know_base_ids = data.get("knowledge_bases", [])
        question = data.get("question", "")

        # 获取检索内容
        texts = await sync_to_async(retrieve_text_vectors)(know_base_ids,question)
        print(texts)
        images = await sync_to_async(retrieve_image_vectors)(know_base_ids)

        # 构造提示词
        context = "\n".join(texts)
        prompt = f"""
你是一个知识助手，请根据以下内容回答问题：
上下文：
{context}
问题：
{question}
回答：
"""

        # 流式生成回答
        async def generate():
            yield json.dumps({"images": images}) + "\n"
            for chunk in generate_with_ollama(prompt):
                yield chunk + "\n"

        return StreamingHttpResponse(generate(), content_type='text/event-stream')
