# -*- coding:utf-8 -*-

# @Time    : 2023/11/28 17:08
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : generate_enterprise_wechat.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import pandas as pd
import os
from data_generate import utils
from bot.insurance_planner_gpt.agent.base_agent import LLMAgent
import asyncio
import uuid
import random

# 清洗解决方案数据
def clean_solution_data(path, target_path):
    # 读取数据
    train_datas = utils.jload(path)
    #判断是否存在目标文件，如果存在则读取，不存在则创建
    if os.path.exists(target_path):
        result_datas = pd.read_csv(target_path)
    else:
        result_datas = pd.DataFrame(columns=["input", "origin_output", "result"])
    result_datas_dict = result_datas.to_dict(orient="records")
    new_datas = train_datas[20:400]
    for data in new_datas:
        conversations = data["conversations"]
        if len(conversations) == 2:
            input = conversations[0]["value"]
            output = conversations[1]["value"]
            if "1.回复的内容是接下来和用户沟通的方案，解决方案需要限定在解决方案集合中，引导操作的时候每次只给一个方案，不要一次性给出多个方案，不要无中生有；" not in input:
                continue
            # 修改prompt，把"注意以下事项："到"请注意额度不够"之间的内容替换
            input = input.replace("1.回复的内容是接下来和用户沟通的方案，解决方案需要限定在解决方案集合中，引导操作的时候每次只给一个方案，不要一次性给出多个方案，不要无中生有；", """1.回复的策略是接下来和用户沟通的方案，解决方案需要限定在解决方案集合中，引导操作的时候每次只给一个方案的一个步骤，不要一次性给出多个方案或多个步骤，不要无中生有；""")
            input = input.replace(
                "2.回复的内容将用于生成具体回复用户的话术，不要包含不必要的信息；",
                """2.回复的策略将用于生成具体回复用户的话术，不要包含不必要的信息，输出的结果需要简洁，回复内容只用于一次沟通，因此不要包含多条指令或多个步骤""")
            input = input.replace(
                "3.回复的内容只用于下一轮和用户沟通的话术生成，也不要列出具体详细的步骤，也不用枚举各种用户问题或解决方案的情况，只需要给出最后解决方案的结论；", """3.回复的策略只是一句短语，不要列出具体详细的步骤，也不用枚举各种用户问题或解决方案的情况，只需要给出最后解决方案的结论；""")

            input = input.replace("限制条件：控制回复的字数在250字以内。","")
            input = input.replace("根据用户问题、用户情境、对话记录和解决方案集合，请一步步思考，分析原因，接下来销售员的解决方案是：", "根据用户问题、用户情境、对话记录和解决方案集合，请一步步思考，接下来销售员的下一步沟通策略是：")
            print(input)
            chat = LLMAgent(input)
            result = asyncio.run(chat.achat_auto_llm(type="gpt"))
            print(result)
            new_data_dict = {}
            new_data_dict["input"] = input
            new_data_dict["origin_output"] = output
            new_data_dict["result"] = result
            result_datas_dict.append(new_data_dict)
    result_datas = pd.DataFrame(result_datas_dict)
    result_datas.to_csv(target_path, index=False)

def csv2vicuna(file_path, target_path):
    df = pd.read_csv(file_path)
    # df = df.dropna()
    train_datas = []
    for index, row in df.iterrows():
        train_data = {}
        session_id = str(uuid.uuid1())
        conversations = []
        conversation = {}
        if pd.isna(row['prompt']) or pd.isna(row['修改后结果']):
            print(row)
            continue

        original_result = row['原结果']

        conversation['from'] = 'human'
        conversation['value'] = row['prompt']
        conversations.append(conversation)
        conversation = {}
        conversation['from'] = 'gpt'
        value = row['修改后结果']
        if "解决方案" in file_path:
            if len(value) > 40:
                if "。" in value:
                    value_0 = value.split("。")[0]
                    value_1 = value.split("。")[1]
                    if len(value_0) > 15:
                        value = value_0
                    elif len(value_1) > 15:
                        value = value_1
                    else:
                        continue

            if "；" in value:
                value = value.split("；")[0]
            if "1." in value:
                value = value.replace("1.", "")

            if len(value) > 40:
                continue

        conversation['value'] = value
        conversations.append(conversation)
        train_data['id'] = session_id
        train_data['conversations'] = conversations
        train_datas.append(train_data)
    utils.jdump(train_datas, target_path)

#合并所有的训练数据
def merge_train_data(path, target_path):
    datas = []
    for file in os.listdir(path):
        if not file.endswith('.json'):
            continue
        data = utils.jload(path + file)
        if ("human_value_alignment_instructions.json" == file) or ("counterfactural_correction_multi_round_chat.json" == file) or ("bot_dialogue.json" == file) or ("cot_2023-09-02.json" == file):
            file_size = len(data)
            # 随机抽取1/3的数据
            data = random.sample(data, int(file_size / 8))
        for record in data:
            new_record = {}
            conversations = record['conversations']
            new_record['id'] = record['id']
            new_record['conversations'] = conversations
            datas.append(record)
    # 打乱datas
    random.shuffle(datas)
    utils.jdump(datas, target_path + "wechat_all_train_data.json")



if __name__ == '__main__':
    # clean_solution_data("../data_set/enterprise_wechat/raw_data/solution_20231121_output.json", "../data_set/enterprise_wechat/result/wechat.csv")

    csv2vicuna("../data_set/enterprise_wechat/raw_data/badcase.csv",
               "../data_set/enterprise_wechat/train_data_files/badcase.json")

    # csv2vicuna("../data_set/enterprise_wechat/raw_data/解决方案.csv",
    #            "../data_set/enterprise_wechat/train_data_files/解决方案.json")

    merge_train_data("../data_set/enterprise_wechat/train_data_files/", "../data_set/enterprise_wechat/result/")
