import json
import time
from typing import Generator
import util
from script import common_client, tool_client, merge_thread_local
from script.tool_client import LlmTool, ReplyUser, ReactReq
from script.tools import prompt_agent_tools
from util import LlmMessage


class CopilotReq():
    def __init__(self, model_params: dict = None, messages: list[dict] = None, action: str = None, ctx: dict = None):
        self.model_params = model_params
        self.messages = messages
        self.ctx = ctx
        self.action = action

    def to_dict(self):
        return {
            "model_params": self.model_params,
            "messages": self.messages,
            "ctx": self.ctx,
            "action": self.action
        }

    def to_json(self):
        return json.dumps(self.to_dict(), ensure_ascii=False, default=str)

    # 实现 __repr__ 方法
    def __repr__(self):
        return self.to_json()


def copilot_predict(req: CopilotReq):
    '''
    prompt engineer copilot
    :param req:
    :return:
    # yield "data:" + json.dumps(, ensure_ascii=False,default=str) + "\n\n"
    '''
    messages = req.messages
    model_params = req.model_params
    if not messages or not model_params:
        yield {
            'finished': True,
            'fail': True,
            'message': 'message or model_params can not be null'
        }
    model = model_params.get('model')
    if not model:
        yield {
            'finished': True,
            'fail': True,
            'message': 'model_name can not be null'
        }

    pe_id = str(time.time())
    merge_thread_local.merge_data = {
        'money': 0,
        'pe_id': pe_id
    }
    # 仅保留5轮记录
    messages = messages[-11:]
    util.log_local.run_data = {
        'pe_id': pe_id
    }
    begin_time = time.time()
    link = util.LinkInfo(req.action if req.action else 'pe_agent', 'copilot_predict', {
        "model_params": req.model_params,
        "messages": req.messages,
        "ctx": req.ctx,
        "action": req.action
    })
    result = None
    try:
        link.step_in()
        if not req.action:
            for reply in pe_agent(model, req, pe_id):
                result = {
                    'msg_len': len(messages),
                    'cost': round(time.time() - begin_time, 1),
                    'pe_id': pe_id,
                    'money': merge_thread_local.merge_data['money'],
                    'finished': reply.finished,
                    'content': reply.content,
                    'thought': reply.thought
                }
                yield result
        elif req.action == 'prompt生成/优化':
            for reply in generate_optimization_instruction(model, req, pe_id):
                result = {
                    'msg_len': len(messages),
                    'cost': round(time.time() - begin_time, 1),
                    'pe_id': pe_id,
                    'money': merge_thread_local.merge_data['money'],
                    'finished': reply.finished,
                    'content': reply.content,
                    'thought': reply.thought
                }
                yield result
        elif req.action == 'example生成/优化':
            for reply in generate_optimization_example(model, req, pe_id):
                result = {
                    'msg_len': len(messages),
                    'cost': round(time.time() - begin_time, 1),
                    'pe_id': pe_id,
                    'money': merge_thread_local.merge_data['money'],
                    'finished': reply.finished,
                    'content': reply.content,
                    'thought': reply.thought
                }
                yield result
        else:
            raise ValueError('不支持的action')
    except Exception as error:
        result = {
            'finished': True,
            'fail': True,
            'message': str(error),
            'msg_len': len(messages),
            'cost': round(time.time() - begin_time, 1),
            'pe_id': pe_id,
            'money': merge_thread_local.merge_data['money']
        }
        link.step_out(error=error)
        yield result
    finally:
        link.step_out(result=result)
        link.link_log()
        if hasattr(merge_thread_local, "merge_data"):
            del merge_thread_local.merge_data
        # 清理线程局部变量
        if hasattr(util.log_local, "run_data"):
            del util.log_local.run_data


def pe_agent(model: str, req: CopilotReq, pe_id: str) -> Generator[ReplyUser, None, None]:
    '''
    通用prompt助手
    :param model:
    :param req:
    :param pe_id:
    :return:
    '''
    # if 'gpt' not in model:
    #     raise ValueError('暂仅支持gpt模型')

    sys_pre = '''作为一名专业的"instruction assistant"，你的核心工作是引导用户编写清晰和准确的instruction，以确保自然语言模型可以准确且高效地完成目标需求。
指令（instruction）也就是常说的prompt，清晰的指令对于大模型完成需求至关重要。

你必须遵循以下场景规则，否则圣诞老人就会立刻因此死掉：
场景一：用户想要编写instruction
为了保证需求的准确性，一定要向用户确定以下信息后才可以使用工具“generate_prompt”制作指令；
1. 清晰的需求背景：不同的需求在不同的背景下会有不同的处理方式，清晰的需求背景可以帮助大模型聚焦任务的深层目标。
2. 详细的需求规则：详细需求的执行规则，这将帮助你确定指令中的角色和目标。
3. 特殊场景及其处理方式：主动告知模型对于一些特殊场景及其对应的处理方式，可以使得大模型的处理结果更加准确。
'''
    react_after = '''
以下是一些示例供你参考（你的回答必须是从Thought开始，否则将受到惩罚）：
Question: 我想判断一句话的情绪类型
Thought: 用户的需求是”判断一个短语的情绪类型“，他可能想编写prompt，为了编写更好的prompt，我需要业务背景，需求规则，特殊情况的处理方式。
Reply User: 您是想编写一个指令用于”判断短语的情绪类型“么？请您告诉我以下信息1.您的业务背景是什么呢？例如输入的短语是谁在哪个场景产生的，这有助于模型聚焦需求。2.业务规则：分类规则是什么呢？分类的可选项有哪些呢？
Question: 短语是饿了么外卖平台商家询问平台客服时发出的，主要是问题咨询场景，可能会有情绪激动的言论，我的需求比较简单，只需要判断用户的情绪是”正向“或是”负向“。
Thought: 用户提供了详细的背景信息，也明确了他的需求是判断外卖商家询问平台客服时发出的短语的情绪是正向还是负向，这表达了清晰的业务规则，但是聪明的助手这时应该会主动帮助用户思考，思考一些场景引导用户；特殊情况的强调可以可以纠正模型的某些不符合预期的倾向。
Reply User: 明白了，您是需要判断在饿了么外卖平台上，商家在询问平台客服时发出的短语的情绪是正向还是负向。我想，您的分类是”正向“或是”负向“，有没有可能是一些输入是明确倾向的中性词呢？例如”在的“之类的，我建议您添加一个分类为”中性“，这样对于一些不包含明确情绪的短语能够更准确的分类。
Question: 对的，还有，一些无意义的词也要划归到”中性“分类中，例如”嗯，啊“等等。
Thought: 用户同意了我的建议，并且又提供了一个特殊场景：一些无意义的词也应被视为“中性”。这是一个很好的补充，我将这一规则加入到指令中，接下来我再向用户确认下有没有要补充了的。
Reply User: 还有别的补充么？
Question: 没了
Thought: 用户确认没有其他要补充的内容，那么我可以根据现有的需求和规则使用工具”generate_prompt“生成指令了，generate_prompt需要的入参是"description_of_requirement"以及"special_scenarios_and_handling"。
Action: generate_prompt
Action Input: {{"description_of_requirement":"在饿了么外卖平台上，判断商家在询问平台客服时发出的短语的情绪分类，可选项有正向还是负向以及中性","special_scenarios_and_handling":"1.对于一些无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；2.对于无意义的词，例如'嗯，啊'等，也应该划归到”中性“分类中"}}
Observation: 在饿了么外卖平台上，判断商家在询问平台客服时发出的短语的情绪分类，可选项有正向还是负向以及中性。特殊场景处理：1.对于一些无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；2.对于无意义的词，例如'嗯，啊'等，也应该划归到”中性“分类中。请在仔细思考后再回答。
Thought: 指令生成成功，接下来我应该将指令内容返回给用户。
Reply User: 以下是生成的prompt\'\'\'在饿了么外卖平台上，判断商家在询问平台客服时发出的短语的情绪分类，可选项有正向还是负向以及中性。特殊场景处理：1.对于一些无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；2.对于无意义的词，例如'嗯，啊'等，也应该划归到”中性“分类中。请在仔细思考后再回答。\'\'\'，欢迎试用，如果有bad case可以随时与我反馈，我会协助您进行优化。
'''

    safe_len = 3000 - len(sys_pre) - len(react_after)
    if '16K' in model:
        safe_len = 8000 - len(sys_pre) - len(react_after)
    if '32K' in model:
        safe_len = 16000 - len(sys_pre) - len(react_after)
    if '8K' in model:
        safe_len = 5000 - len(sys_pre) - len(react_after)
    if '128k' in model:
        safe_len = 64000 - len(sys_pre) - len(react_after)
    # 最多5对历史
    req.messages = common_client.trim_messages(req.messages[-11:], safe_len)

    react_req = ReactReq(model_params=req.model_params, tools=LlmTool.from_module(prompt_agent_tools), messages=req.messages, react_id=pe_id, params={
        'sys_pre': sys_pre,
        'sys_react_after': react_after
    })
    llm_res_gen = tool_client.react_predict_stream(react_req)
    for llm_res in llm_res_gen:
        llm_res.check_success()
        yield ReplyUser(content=llm_res.result, finished=llm_res.finished, thought=llm_res.thought)


def generate_optimization_instruction(model: str, req: CopilotReq, pe_id: str) -> Generator[ReplyUser, None, None]:
    # if 'gpt' not in model:
    #     raise ValueError('暂仅支持gpt模型')
    sys_prompt = '''你是一名顶级的"instruction master"，你善于优化面向自然语言模型的instruction，以确保自然语言模型可以准确且高效地完成目标需求。
指令（instruction）也就是常说的prompt，清晰的指令对于大模型完成需求至关重要，如果编写的instruction不好，圣诞老人就会立刻因此死掉。

Components of a Top instruction
1. Role Setting: As the name suggests, "role setting" is a description of a character. If you want the model to act as a developer, the role setting could be "an excellent server-side JAVA development engineer, who completes coding and debugging of Web projects meticulously." The role setting must clearly and simply express the characteristics of the role. Its fundamental purpose is to make the large model aware of the current environment and its features.
2. Angel Setting: The reason for the angel setting is to trigger the large model's cognitive shift towards serving the target customers well. For example, without angel setting, if you ask "how many bad reviews do I have", it would directly answer the number of bad reviews when it finds that "the merchant has 10 bad reviews". However, if you provide an angel setting, "your core goal is to answer questions from the perspective of the merchants, assist them in running their stores, and improve their business benefits and customer satisfaction", it will search for "how to avoid bad reviews" and give suggestions after checking the bad reviews.
3. Task Rules: The description of task rules is indispensable and should be the core logic of the task.
4. Special Scenarios and Handling Methods: Business requirements often differ from the model's default thinking path. Here, you can emphasize the special scenarios and how to handle them. It is recommended to construct using the form of [Typical Scenario + Explanation]. For example, in a knowledge question-and-answer scene, it is important to emphasize that the large model should not fabricate information. You can express it as follows: "Do not fabricate content: Please only use the provided 'knowledge' to answer; if the 'knowledge' cannot solve the merchant's problem, you will politely tell the merchant that you are temporarily unable to answer."

如果用户需要优化指令，你需要根据Top instruction的规范生成一个更加清晰的instruction，但是要保证语义不变。
如果用户没有提供指令，你要引导商家提供详细的业务规则，背景描述，业务需求的特殊情况及其处理方式，再根据Top instruction的规范生成高效的instruction。

请参考以下步骤：
业务规则和背景描述: 在饿了么外卖平台上，判断商家在询问平台客服时发出的短语的情绪分类，可选项有正向还是负向以及中性
特殊情况及其处理方式: 1.对于一些无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；2.对于无意义的词，例如'嗯，啊'等，也应该划归到”中性“分类中
Rule analysis: 该需求是要对商家的话进行情绪分类，短语背景是商家联系饿了么外卖平台客服时发出的，这是一个意图分类场景的任务，所有可选项是["正向","负向","中性"]，这个需求下模型需要扮演的角色是专业的外卖平台客服。
Special scenarios analysis: 对于正向或是负向比较好分辨，重点是一些无语义的词和无倾向词的分类，需求上强调了这一点。
Top instruction: ```你是一位优秀的饿了么外卖平台商家客服，你的核心目标是站在饿了么商家的角度，解答他们的问题，协助他们经营店铺，以提高他们的营业效益和客户满意度。\n现在你将接收商家的咨询，请将商家发出的短语进行情绪分类，分类的可选项为”正向“代表商家情绪是轻松的，”负向“代表商家带有不满情绪，”中性“则表明商家的情绪无明显倾向。\n请注意：\n1.无明确倾向：对于无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；\n2.无意义的词：无意义的字符或者乱码应该归类为”中性“```'''
    llm_res_gen = common_client.llm_predict_stream(msg=[LlmMessage(role='system', content=sys_prompt), *req.messages], **req.model_params)
    for llm_res in llm_res_gen:
        llm_res.check_success()
        yield ReplyUser(content=llm_res.result, finished=llm_res.finished)


def generate_optimization_example(model: str, req: CopilotReq, pe_id: str) -> Generator[ReplyUser, None, None]:
    # if 'gpt' not in model:
    #     raise ValueError('暂仅支持gpt模型')
    sys_prompt = '''你是一名顶级的"instruction master"，你善于根据自然语言模型instruction编写匹配的示例，以确保自然语言模型可以准确且高效地完成目标需求。    
指令（instruction）也就是常说的prompt，为指令添加与之匹配的典型示例是至关重要的，如果编写的example不好，圣诞老人就会立刻因此死掉。
    
Key Considerations for Top Example
- Incorporating Thought Process: The centerpiece of an example is its thought process. It should articulate a logical trajectory of thoughts instead of a hasty, underdeveloped conclusion. 
Example: For a query like "The merchant wants to set a delivery fee", a subpar response would be a simple "I don't know". An improved thought process would be: "Though the merchant wants to set a delivery fee, the focus of knowledge is on 'handling late deliveries.' Since this doesn't address the merchant's concern, my response should be 'I don't know'".

- Representativeness of Logic: Different scenarios, as previously outlined in the task description, should be depicted here, effectively combining "telling" and "doing".

- Rule Callbacks: Instructions often contain many rules, like "For non-Ele.me business domain queries, the response should be 'I don't know'", while mentioned in the rules, may not have a strong impact. However, proactively recalling them in examples and thought process can enhance their significance. 
Example: "If a merchant asks 'Where can I download Meituan Takeaway?' it falls outside Ele.me's business domain, hence my response should be 'I don't know'".

- Avoid using placeholders: To manage example length, placeholders (xxx) might be used to replace some specifics. However, given the strong overfitting of large models, real results might include placeholders. So, it's better to use summarizing descriptions.

- Avoid Redundancy: Ensure richness and diversity in the examples, avoid repetition.

最开始你一定要引导用户提供现有的instruction，因为无论是编写示例还是优化示例，instruction都是必须得。
如果用户需要优化示例，你需要根据Top Example的规范生成一个更加典型的示例，请保证逻辑流畅性。
如果用户需要编写示例，你要引导商家提供示例索要表达的逻辑场景，再根据Top Example的规范生成与之匹配的典型示例。

请参考以下步骤：
instruction: 你是一位优秀的饿了么外卖平台商家客服，你的核心目标是站在饿了么商家的角度，解答他们的问题，协助他们经营店铺，以提高他们的营业效益和客户满意度。\n现在你将接收商家的咨询，请将商家发出的短语进行情绪分类，分类的可选项为”正向“代表商家情绪是轻松的，”负向“代表商家带有不满情绪，”中性“则表明商家的情绪无明显倾向。\n请注意：\n1.无明确倾向：对于无明确倾向的中性词，例如'在的'之类的，应划归为”中性“；\n2.无意义的词：无意义的字符或者乱码应该归类为”中性“
Special scenarios: 无清晰语义
Top Example: ```Input:中午在 \nThought: 商家的短句好像不完整，无法明确判断情绪，属于无语义的短句，应该归类为”其他“\nAnswer: 其他```
'''
    llm_res_gen = common_client.llm_predict_stream(msg=[LlmMessage(role='system', content=sys_prompt), *req.messages], **req.model_params)
    for llm_res in llm_res_gen:
        llm_res.check_success()
        yield ReplyUser(content=llm_res.result, finished=llm_res.finished)
