from langchain_community.llms.tongyi import Tongyi
from langchain_core.prompts import PromptTemplate


def generate_sse_lc(chunks):
    print("####")
    for chunk in chunks:
        data = f"data: {chunk}\n\n"
        if chunk:
            yield data.encode('utf-8')
        else:
            print('____________')
            return 'no mes'


from django.http import StreamingHttpResponse
from django.views.decorators.http import require_GET


@require_GET
def sse_notifications(request):
    query_text = request.GET.get('ask')  # 调用函数获取用户输入

    # 实例化模板类
    pp = "帮我返回{res}中答案"

    prompt_template = PromptTemplate.from_template(pp)
    prompt = prompt_template.format(res=query_text)

    llm = Tongyi()

    chunks = llm.stream(prompt)

    response = StreamingHttpResponse(
        generate_sse_lc(chunks),
        content_type="text/event-stream",
    )
    response["Cache-Control"] = "no-cache"
    return response
