# -*- coding:utf-8 -*-
# @Time    : 2023/3/23 21:39
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : train_data_merge.py
# @Software: LLM_internal

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import copy
import json

from util_tool import utils
import os
import random
import uuid
import re
from common.log import logger
import pandas as pd

project_dir = os.path.dirname(os.path.dirname(__file__))

#读取all.json
def read_json(path):
    try:
        data = utils.jload(path)
        new_data = []
        for instruct_dict in data:
            # 只保留data的instruction, input, output
            instruct_dict_new = {}
            instruct_dict_new["instruction"] = instruct_dict.get("instruction", "")
            instruct_dict_new["input"] = instruct_dict.get("input", "")
            instruct_dict_new["output"] = instruct_dict.get("output","")
            # if len(instruct_dict_new["instruction"] + instruct_dict_new["input"] + str(instruct_dict_new["output"])) > 1800:
            #     print("length of data is too long:")
            #     print(instruct_dict_new)
                # continue

            new_data.append(instruct_dict_new)

    except:
        # 按行读取，每行是个json
        with open(path, "r", encoding="utf-8") as f:
            new_data = []
            for line in f:
                try:
                    # 对于文本中包含bell的数据删掉整条数据，大小写不敏感
                    if "belle" in line.lower():
                        continue
                    instruct_dict = json.loads(line)
                    #如果instruction, input, output在instruct_dict中则直接添加
                    if "instruction" in instruct_dict.keys() and "input" in instruct_dict.keys() and "output" in instruct_dict.keys():
                        new_data.append(instruct_dict)
                    else:
                        input = instruct_dict["input"]
                        target = instruct_dict["target"]

                        instruct_dict_new = {}
                        instruct_dict_new["instruction"] = input
                        instruct_dict_new["output"] = target

                        new_data.append(instruct_dict_new)

                except:
                    logger.error("read_json error:")
                    logger.error(line)
                    continue

    return new_data

#写入json,用utf-8编码
def write_json(path, data):
    utils.jdump(data, path)


random_instruct_list = ["你是一个全知全能的智能助手",
"你是一个无所不知的人工智能",
"你是一个具备广博知识的智能个人助理",
"你是一个资讯渊博的智能伴侣",
"你是一个多才多艺的AI助理",
"你是一个精通一切的智能机器人",
"你是一个具备卓越学识的人工智能小助手",
"你是一个博览群书的智能语音助手",
"你是一个多学科专家的智能问答机器人",
"你是一个拥有丰富经验的智能问答管家"]

#合并两个json文件
def merge_json(path1, path2, path3):
    data1 = read_json(path1)
    data2 = read_json(path2)
    if path2 == "QA_task_data/result_merge.json":
        for data in data2:
            if random.randint(0, 5) == 1:
                data["instruction"] = random_instruct_list[random.randint(0, len(random_instruct_list) - 1)] + "\n" + data[
                "instruction"]
    elif path1 == "QA_task_data/result_merge.json":
        for data in data1:
            if random.randint(0, 5) == 1:
                data["instruction"] = random_instruct_list[random.randint(0, len(random_instruct_list) - 1)] + "\n" + data[
                "instruction"]
    data_result = data1 + data2
    # 随机打乱数据
    random.shuffle(data_result)
    write_json(path3, data_result)

# 转换结果成chatllama的格式instruction对应user_input，output对应completion
def convert_format(path1, path2):
    data = read_json(path1)
    new_data = []
    for instruct_dict in data:
        instruct_dict_new = {}
        instruct_dict_new["user_input"] = instruct_dict.get("instruction", "")
        if "input" in instruct_dict:
            instruct_dict_new["user_input"] = instruct_dict_new["user_input"]  + "\n" + instruct_dict.get("input", "")
        instruct_dict_new["completion"] = instruct_dict.get("output", "")
        new_data.append(instruct_dict_new)
    write_json(path2, new_data)


# 合并common_sense目录下所有的json文件
def merge_path_all_files(path1, path2):
    data = []
    for file in os.listdir(path1):
        if ".json" not in file:
            continue
        data.extend(read_json(path1 + file))

    random.shuffle(data)
    write_json(path2, data)

# 根据json list内的文件及目录，合并成一个json文件
def merge_json(json_list, path3):
    data_result = []
    for json_file in json_list:
        # 判断是否为目录
        if os.path.isdir(json_file):
            for file in os.listdir(json_file):
                if ".json" not in file:
                    continue
                data_result.extend(read_json(json_file + file))
        else:
            data_result.extend(read_json(json_file))
    random.shuffle(data_result)
    write_json(path3, data_result)

# 把json文件中的数据转换成vicuna的格式
def convert_vicuna_format(path1, path2):
    data = read_json(path1)
    new_data = []
    for instruct_dict in data:
        instruct_dict_new = {}
        #随机生成一串唯一字符串作为id
        id = str(uuid.uuid1())
        instruct_dict_new["id"] = id
        instruct_dict_new["conversations"] = []
        conversation_dict = {}
        conversation_dict["from"] = "human"
        if instruct_dict.get("input", ""):
            conversation_dict["value"] = instruct_dict.get("instruction", "") + "\n" + instruct_dict.get("input", "")
        else:
            conversation_dict["value"] = instruct_dict.get("instruction", "")
        instruct_dict_new["conversations"].append(conversation_dict)
        conversation_dict = {}
        conversation_dict["from"] = "gpt"
        conversation_dict["value"] = instruct_dict.get("output", "")
        instruct_dict_new["conversations"].append(conversation_dict)
        new_data.append(instruct_dict_new)

    # 取前1000条数据
    # new_data = new_data[:2000]
    write_json(path2, new_data)

def handle_human_value_alignment_special(path1, path2):
    data = read_json(path1)
    new_data = []
    for instruct_dict in data:
        instruct_dict_new = {}
        #随机生成一串唯一字符串作为id
        id = str(uuid.uuid1())
        instruct_dict_new["id"] = id
        instruct_dict_new["conversations"] = []
        conversation_dict = {}
        conversation_dict["from"] = "human"
        if instruct_dict.get("input", ""):
            conversation_dict["value"] = instruct_dict.get("instruction", "") + "\n" + instruct_dict.get("input", "")
        else:
            conversation_dict["value"] = instruct_dict.get("instruction", "")
        if "一、判断题(" in conversation_dict["value"]:
            continue
        if len(conversation_dict["value"]) < 150:
            continue
        instruct_dict_new["conversations"].append(conversation_dict)
        conversation_dict = {}
        conversation_dict["from"] = "gpt"
        conversation_dict["value"] = instruct_dict.get("output", "")
        if len(conversation_dict["value"]) < 50:
            continue
        instruct_dict_new["conversations"].append(conversation_dict)
        new_data.append(instruct_dict_new)

    # 取前1000条数据
    new_data = new_data[:4000]
    write_json(path2, new_data)

def find_json_files(path):
    """递归遍历目录及其子目录，查找所有以.json结尾的文件"""
    for root, dirs, files in os.walk(path):
        for file in files:
            if file.endswith(".json"):
                file_path = os.path.join(root, file)
                yield file_path

        for subdir in dirs:
            find_json_files(os.path.join(path, subdir))
# 将moss 转化成vicuna的格式
def convert_moss_vicuna_format(path1, path2):
    json_file_list = []
    for file_path in find_json_files(path1):
        json_file_list.append(file_path)

    new_data = []
    for file_path in json_file_list:
        data_json = utils.jload(file_path)
        instruct_dict_new = {}
        # 随机生成一串唯一字符串作为id
        id = str(uuid.uuid1())
        instruct_dict_new["id"] = id
        instruct_dict_new["conversations"] = []
        need_filter = False
        for key, value in data_json["chat"].items():

            conversation_dict = {}
            conversation_dict["from"] = "human"
            conversation_dict["value"] = str(value["Human"]).replace("<|Human|>:","",).replace("<eoh>","")
            if "MOSS" in str(conversation_dict["value"]):
                need_filter = True
                break
            instruct_dict_new["conversations"].append(conversation_dict)
            conversation_dict = {}
            conversation_dict["from"] = "gpt"
            conversation_dict["value"] = str(value["MOSS"]).replace("<|MOSS|>:","",).replace("<eom>","")
            if "MOSS" in str(conversation_dict["value"]):
                need_filter = True
                break
            instruct_dict_new["conversations"].append(conversation_dict)
        if not need_filter:
            new_data.append(instruct_dict_new)
    write_json(path2, new_data)

#合并vicuna格式的json
def merge_vicuna_json(path1, path2):
    data = []
    for file in os.listdir(path1):
        if ".json" not in file:
            continue
        # 检查内容是否有空的情况
        json_data = utils.jload(path1 + file)

        for json_data_dict in json_data:
            need_filter = False
            conversations = json_data_dict["conversations"]
            for conversation in conversations:
                if not conversation["value"] or (len(conversation["value"].strip()) < 1):
                    logger.error("file: %s, value is empty" % file)
                    logger.error("conversation: %s" % conversation)
                    need_filter = True

            if not need_filter:
                data.append(json_data_dict)

    random.shuffle(data)
    write_json(path2, data)



def covert_vicuna_to_alpaca(path1, path2):
    json_data = utils.jload(path1)
    new_data = []
    for json_data_dict in json_data:
        conversations = json_data_dict["conversations"]
        new_dict = {}
        for conversation in conversations:
            if conversation["from"] == "human":
                new_dict["instruction"] = conversation["value"]
            else:
                new_dict["output"] = conversation["value"]
            new_dict["input"] = ""
            if conversation["from"] == "gpt":
                new_data.append(new_dict)
                new_dict = {}
    write_json(path2, new_data)




def check_planner_data(path1, target_path):
    json_data = utils.jload(path1)
    new_data = []
    for json_data_dict in json_data:
        conversations = json_data_dict["conversations"]
        if len(conversations) == 2:
            for conversation in conversations:
                if conversation["from"] == "human":
                    input = conversation["value"]
                    if "介绍自己用热情活泼的" in input:
                        # 随机过滤掉三分之二的数据
                        if random.randint(0, 2) != 1:
                            break

                    input = str(input).replace("缴纔", "缴纳")
                    input = str(input).replace("缴纪", "缴纳")
                    input = str(input).replace("缴纩", "缴纳")
                    conversation["value"] = filter_vicuna_data(input)
                else:
                    output = conversation["value"]
                    output = str(output).replace("缴纔", "缴纳")
                    output = str(output).replace("缴纪", "缴纳")
                    output = str(output).replace("缴纩", "缴纳")

                    if "稍等" in output and "计算" in output:
                        print(output)
                        break
                    conversation["value"] = output
                    if "json" in input:
                        try:
                            result = utils.json_load_llm_result(output)
                            # 对于result为{}的情况，随机过滤掉三分之二的数据
                            if not result:
                                if random.randint(0, 2) != 1:
                                    break

                            conversation["value"] = str(result)
                            new_data.append(json_data_dict)
                        except Exception as e:

                            logger.error(e)
                            logger.error(input)
                            logger.error(output)
                    else:
                        if ("函数" in output) or ("脚本" in output):
                            print(output)
                            break
                        new_data.append(json_data_dict)

    utils.jdump(new_data, target_path)

def filter_vicuna_data(input_str):
    if len(input_str) > 2000:
        if "作为一个最专业规划师的解决方案" in input_str:
            # 替换掉"注意以下事项："到"用户问题："间的内容
            input_str = re.sub(r'(?s)注意以下事项：.*?用户问题：', '注意以下事项：1.你当前的服务范围仅是退休养老规划和保险规划，你当前没有能力也不能给出其他范围任何问题（包含咨询天气、政治、语文、历史、数学题计算等等）的解决方案，对于其他范围的问题，你必须拒绝回答。\n2.输出结果是接下来要执行的策略（下一步沟通策略，下一步执行脚本名称，下一步查询知识库内容），输出结果的格式是json，格式是:{"下一步沟通策略":"","下一步执行脚本名称":"无","下一步查询知识库内容":"无"}\n3.如果背景知识可以用于回答用户问题，可以参考背景知识来确保你的解决方案的事实性和准确性\n用户问题：', input_str)
        elif "提取对话记录中的用户信息" in input_str:
            # 替换掉"其中'==='后是对话记录"到"不要在json中输出"间的内容
            input_str = re.sub(r'(?s)其中\'===\'后是对话记录.*?不要在json中输出', "只使用'==='之间的对话记录来做出决策。需要注意以下信息：你需要输出对话记录中所有包含需要识别信息的结果，一步一步确认，一个都不要遗漏，只要是用户有提及，即使用户说没有的，也要输出，但用户未提及的则不要输出", input_str)
            # 删除"示例（任务忽略）："到"示例结束;"间的内容
            input_str = re.sub(r'(?s)示例（任务忽略）：.*?示例结束;', '', input_str)
        elif "请记住，你是犀心小助，你是一个专业的专属规划师" in input_str:
            # 删除"需要注意以下事项："到"当前的会话阶段做出回应"间的内容
            input_str = re.sub(r'(?s)需要注意以下事项：.*?当前的会话阶段做出回应', '需要注意以下事项：不要忘记你的业务范围，你当前的服务范围仅是退休养老规划和保险规划，你当前没有能力也不能给出其他范围任何问题（包含咨询天气、政治、语文、历史、数学题计算等等）的解决方案，对于其他范围的问题，你必须拒绝回答。根据会话历史和当前的会话阶段做出回应。只产生犀心小助的话术，不要产生用户的对话，每次只生成一个回复，请给用户回复的空间！', input_str)
            # 删除"专业知识："到"会话历史："间的内容
            input_str = re.sub(r'(?s)专业知识：.*?会话历史：', '会话历史：', input_str)
    return input_str

def handel_prm_data(path1, path2):
    result_datas = []
    #读取jsonl文件中的数据
    with open(path1, "r", encoding="utf-8") as f:
        for line in f:
            data = json.loads(line)
            instruct_dict_new = {}
            # 随机生成一串唯一字符串作为id
            id = str(uuid.uuid1())
            instruct_dict_new["id"] = id
            instruct_dict_new["conversations"] = []
            conversation_dict = {}
            conversation_dict["from"] = "human"
            conversation_dict["value"] = data.get("problem", "")
            instruct_dict_new["conversations"].append(conversation_dict)
            conversation_dict = {}
            conversation_dict["from"] = "gpt"
            conversation_dict["value"] = data.get("solution", "")
            instruct_dict_new["conversations"].append(conversation_dict)
            result_datas.append(instruct_dict_new)

    #取数据的前3000
    result_datas = result_datas[:6000]
    # 存储到json文件中
    utils.jdump(result_datas, path2)

def handle_llm_data(path1, path2):
    #遍历path1下的所有文件
    result_datas = []
    for file in os.listdir(path1):
        if not file == "badcase.json":
            continue
        if file.endswith(".json"):
            check_planner_data(path1 + file, path2 + file)

def prepare_planner_vicuna_data():
    # handel_prm_data("../data_set/prm_800k/train.jsonl", "../data_set/insurance_planner/train.json")
    # handel_prm_data("../data_set/prm_800k/test.jsonl", "../data_set/insurance_planner/test.json")
    # convert_vicuna_format("../data_set/common_sense/leetcode.json", "../data_set/insurance_planner/leetcode.json")
    # convert_vicuna_format("../data_set/common_sense/counterfactural_correction_multi_round_chat.json", "../data_set/insurance_planner/counterfactural_correction_multi_round_chat.json")
    # convert_vicuna_format("../data_set/common_sense/human_value_alignment_instructions.json", "../data_set/insurance_planner/human_value_alignment_instructions.json")
    # handle_human_value_alignment_special("../data_set/common_sense/human-value-alignment_special.json", "../data_set/insurance_planner/human-value-alignment_special.json")
    load_knowledge_data()
    handle_llm_data("../data_set/dialogue_data/", "../data_set/insurance_planner/")
    merge_vicuna_json(project_dir + "/data_set/insurance_planner/",
                      project_dir + "/data_set/merge_output/vicuna/insurance_planner_insurance_knowledge_vicuna_all_new.json")

def load_knowledge_data():
    csv_path = project_dir + "/internal_server/knowledge_base/raw_data/insurance_planner_gpt/"

    faqs = []
    if os.path.isdir(csv_path):
        for file in os.listdir(csv_path):
            if not file.endswith('.csv'):
                continue
            df = pd.read_csv(csv_path + file)
            faqDictList = df.to_dict('records')
            logger.info(f'{file}加载成功, 共有{len(faqDictList)}条数据')
            faqs.extend(faqDictList)

    logger.info('共有{}条数据'.format(len(faqs)))
    result_datas = []
    for faq in faqs:
        instruct_dict_new = {}
        # 随机生成一串唯一字符串作为id
        id = str(uuid.uuid1())
        instruct_dict_new["id"] = id
        instruct_dict_new["conversations"] = []
        human_dict = {}
        human_dict["from"] = "human"
        gpt_dict = {}
        gpt_dict["from"] = "gpt"
        if "答案" in faq:
            human_dict["value"] = faq['用户问题'].strip()
            gpt_dict["value"] = faq['答案'].strip()
        elif "解决方案" in faq:
            human_dict["value"] = faq['用户问题'].strip()
            gpt_dict["value"] = faq['解决方案'].strip()
        else:
            human_dict["value"] = faq['知识点'].strip()
            gpt_dict["value"] = faq['知识内容'].strip()
        instruct_dict_new["conversations"].append(human_dict)
        instruct_dict_new["conversations"].append(gpt_dict)
        result_datas.append(instruct_dict_new)

    utils.jdump(result_datas, project_dir + "/data_set/insurance_planner/planner_knowledge.json")








if __name__ == "__main__":
    # merge_path_all_files("common_sense/", "common_sense/common_sense_all.json")
    # merge_path_all_files("../data_generate/entity/", "../data_generate/entity/entity_all.json")
    # merge_json("QA_task_data/lingxi.train.json", "insurance_planner/insurance_planner_insurance_knowledge.json",
    #            "insurance_planner/insurance_planner_insurance_knowledge_new.json")

    # json_list = ["../data_set/insurance_planner/", "../data_set/common_sense/", "../data_set/extract_data/",
    #              "../data_set/finance/", "../data_set/insurance/", "../data_set/reading_comprehension/"]
    # #
    # merge_json(json_list, "../data_set/merge_output/insurance_planner_insurance_knowledge_new.json")
    # convert_vicuna_format("../data_set/merge_output/insurance_planner_insurance_knowledge_new.json","../data_set/merge_output/insurance_planner_insurance_knowledge_vicuna.json")
    # convert_moss_vicuna_format("../data_set/raw_data/moss/conversations/conversation_without_plugins","../data_set/moss/moss_vicuna.json")

    # load_knowledge_data()
    prepare_planner_vicuna_data()
    #
    # merge_vicuna_json([project_dir +"/data_set/dialogue_data/llm.json", project_dir +"/data_set/dialogue_data/llm_auto.json"],project_dir +"/data_set/merge_output/vicuna/insurance_planner_insurance_knowledge_vicuna_all_new.json")

    # covert_vicuna_to_alpaca(project_dir +"/data_set/dialogue_data/llm.json", project_dir +"/data_set/dialogue_data/llm_alpaca.json")
    # check_planner_data(project_dir + "/data_set/dialogue_data/llm.json", project_dir + "/data_set/dialogue_data/llm_new.json")
    # check_planner_data(project_dir + "/data_set/dialogue_data/llm_auto.json", project_dir + "/data_set/dialogue_data/llm_auto_new.json")
    # merge_vicuna_json([project_dir +"/data_set/dialogue_data/llm_new.json", project_dir +"/data_set/dialogue_data/llm_auto_new.json"],project_dir +"/data_set/merge_output/vicuna/insurance_planner_insurance_knowledge_vicuna_all_new.json")



    # merge_json([project_dir +"/data_set/dialogue_data/llm_alpaca.json", project_dir +"/data_set/dialogue_data/llm_auto_alpaca.json"], project_dir +"/data_set/merge_output/vicuna/insurance_planner_insurance_knowledge_alpaca_all_new.json")

    # result = utils.jload("../data_set/raw_data/企微金条第二版0501-0725-replace.json")
    # result_new = []
    # for json_data_dict in result:
    #     conversations = json_data_dict["conversations"]
    #     if len(conversations) > 3:
    #         if conversations[0]["from"] == "gpt":
    #             if "\n\n" in conversations[0]["value"]:
    #                 value = conversations[0]["value"]
    #                 conversations[0]["value"] = str(value).split("\n\n")[-1]
    #             conversations.insert(0, {
    #             "value": "我通过了你的联系人验证请求，现在我们可以开始聊天了",
    #             "from": "human"
    #         })
    #
    #
    # for json_data_dict in result:
    #     conversations = json_data_dict["conversations"]
    #     if len(conversations) > 3:
    #         have_user = False
    #         for conversation in conversations:
    #             if conversation["from"] == "human":
    #                 have_user = True
    #             value = conversation["value"]
    #             conversation["value"] = re.sub(r'这是一条引用.*?---\n', '', value, flags=re.DOTALL)
    #             value = conversation["value"]
    #             conversation["value"] = re.sub(r'这是一条引用.*?\n', '', value, flags=re.DOTALL)
    #
    #         if have_user:
    #             instruct_dict_new = {}
    #             instruct_dict_new["id"] = json_data_dict["id"]
    #             instruct_dict_new["conversations"] = conversations
    #             result_new.append(instruct_dict_new)
    #
    # utils.jdump(result_new, "../data_set/raw_data/wechat_0501-0725-replace_new.json")
