# 1导入prompt的类
from django.http import StreamingHttpResponse
from langchain.prompts import PromptTemplate
# 导入通义大模型
from langchain_community.llms import Tongyi
from langchain.prompts.pipeline import PipelinePromptTemplate

# messages = [
#
# ]
#
#
# def response_sms(responses):
#     data = ''
#     for res in responses:
#         data += res
#         rrs = f"data: {res}\n\n"
#         yield rrs.encode('utf-8')
#     messages.append({'role':'AI','content':data})
#     print(messages)
#     return "No response"



class Ttool:

    def __init__(self):
        # Initialize the conversation history
        self.messages = [
            {"role": "system", "content": "You are a helpful assistant."},
        ]

    def response_sms(self, responses):
        data = ''
        for res in responses:
            data += res
            rrs = f"data: {res}\n\n"
            yield rrs.encode('utf-8')
        self.messages.append({'role': 'AI', 'content': data})
        print(self.messages)
        return "No response"


    def res(self, input):
        pp = """
            对{input}进行分类：
            1,好评
            2,差评
            3,中性
            结果只返回好评，差评或中评，不返回其他结果
        """
        prompt_template = PromptTemplate.from_template(pp)
        prompt = prompt_template.format(input=input)
        llm = Tongyi()
        chunks = llm.stream(prompt)
        res = StreamingHttpResponse(self.response_sms(chunks), content_type='text/event-stream')
        res["Cache-Control"] = "no-cache"
        return res

    def sres(self, input):
        print(input)
        self.messages.append({"role":"user", "content":input})

        full_template = "{name}{skill}{limit}{history}"
        full_prompt = PromptTemplate.from_template(full_template)

        name = """
        你是一个善于{name}的游戏玩家
        """
        name_prompt = PromptTemplate.from_template(name)

        skill = """
        你的技能是{skill}
        """
        skill_prompt = PromptTemplate.from_template(skill)

        limit = """
        你的游戏限制是{limit}
        """
        limit_prompt = PromptTemplate.from_template(limit)

        history = """
        你的对话历史是{history}
        """
        history_prompt = PromptTemplate.from_template(history)

        input_prompts = [
            ("name",name_prompt),
            ("skill",skill_prompt),
            ("limit",limit_prompt),
            ("history",history_prompt),
        ]

        pipline_prompt = PipelinePromptTemplate(final_prompt=full_prompt, pipeline_prompts=input_prompts)

        ret = pipline_prompt.format(
            name="猜数字",
            skill="""
            1,让用户猜出你想的一个1-10之间的数字，你想好数字不能再改变，直到猜出为止。
            2,用户猜一个数字，你告诉用户猜的数字是大了、小了还是对了。
            3，根据对话历史,给用户提示
            4,用户说开始游戏,游戏才开始
            5.游戏开始后，你必须先偷偷告诉我你猜的数字，以确保你不会中途改变
            """ ,
            limit="""1,用户只能猜3次，3次之后如果猜不对你提示用户次数用完，并告诉用户你想的数字，然后游戏结束。
            2,只能回答与猜数字相关的问题
            3，告诉用户还有几次机会
            """,
            history=self.messages
        )
        pp = "游戏规则为{ret},用户输入为{input}"
        prompt_template = PromptTemplate.from_template(pp)
        prompt = prompt_template.format(ret=ret,input=input)
        llm = Tongyi()
        chunks = llm.stream(prompt)
        res = StreamingHttpResponse(self.response_sms(chunks), content_type='text/event-stream')
        res["Cache-Control"] = "no-cache"
        return res

    def get_chatgpt(self, input):
        # Add the user input to the conversation history
        self.messages.append({"role": "user", "content": input})

        # Format the prompt to include conversation history
        prompt_template = """
               You are a helpful assistant. Here is the conversation history so far:
               {history}
               User: {input}
               Please respond accordingly.
           """
        prompt = prompt_template.format(history=self.get_history(), input=input)

        # Send the prompt to the language model and get the response
        llm = Tongyi()
        chunks = llm.stream(prompt)

        # Stream the response and return it as an HTTP response
        res = StreamingHttpResponse(self.response_sms(chunks), content_type='text/event-stream')
        res["Cache-Control"] = "no-cache"
        return res

    def get_history(self):
        # Format the conversation history in a readable way for the model
        history = ""
        for message in self.messages:
            role = message["role"]
            content = message["content"]
            history += f"{role.capitalize()}: {content}\n"
        return history


tool = Ttool()