import time

from django.http import StreamingHttpResponse
from django.shortcuts import render
from langchain_community.document_loaders import TextLoader
from langchain_community.llms.tongyi import Tongyi
from langchain_core.prompts import PromptTemplate
from langchain_text_splitters import CharacterTextSplitter

from utils.Dbutils import Dbutils
from rest_framework.views import APIView
from rest_framework.response import Response
def get_request(ask):
    # 原有数据处理逻辑
    doc = TextLoader("./cache/haha.txt", encoding='utf-8').load()
    spliter = CharacterTextSplitter("\n", chunk_size=200, chunk_overlap=0)
    chunks = spliter.split_documents(doc)
    db1 = Dbutils()
    db1.add_data(chunks, "myragkey")
    if ask:
        rs = db1.query_data("myragkey", ask, 3)
        template = "用户问题是:{q},已知内容是:{ret},需要你根据已知内容和问题,整合一个结果给我"
        pt = PromptTemplate.from_template(template)
        pmpt = pt.format(q=ask, ret=rs)
        llm = Tongyi()
        # 假设 Tongyi 的 stream 方法返回生成器
        resp_stream = llm.stream(pmpt)  # 使用流式调用
        return resp_stream
    else:
        return iter([])  # 返回空生成器

# 新增流式响应视图函数
def llm_stream(request):
    ask = request.GET.get('ask')
    def generate():
        for chunk in get_request(ask):
            yield chunk
            time.sleep(0.02)  # 控制流式速度（可选）
    return StreamingHttpResponse(generate(), content_type='text/plain')

# path('llm_stream/', llm_stream)