# -*- coding:utf-8 -*-

# @Time    : 2023/3/23 21:39
# @Author  : zengwenjia
# @Email   : zengwenjia@lingxi.ai
# @File    : merge.py

# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import re
import json
import tqdm
import utils
import random
import os
import pandas as pd
import uuid


#读取all.json
def read_json(path):
    data = utils.jload(path)
    new_data = []
    for instruct_dict in tqdm.tqdm(data):
        # 只保留data的instruction, input, output
        instruct_dict_new = {}
        instruct_dict_new["instruction"] = instruct_dict.get("instruction", "")
        instruct_dict_new["input"] = instruct_dict.get("input", "")
        instruct_dict_new["output"] = instruct_dict.get("output","")
        new_data.append(instruct_dict_new)

    return new_data


#写入json,用utf-8编码
def write_json(path, data):
    utils.jdump(data, path)


#合并两个json文件
def merge_json(file_paths, output_path):

    all_data = list()
    for file_path in file_paths:
        data = read_json(file_path)
        all_data += data

    # 随机打乱数据
    random.shuffle(all_data)
    write_json(output_path, all_data)

def get_wechat_data(path):
    data = utils.jload(path)
    new_data = []
    for instruct_dict in tqdm.tqdm(data):
        # 只保留data的instruction, input, output
        instruct_dict_new = {}
        instruct_dict_new["id"] = instruct_dict.get("id", "")
        instruct_dict_new["conversations"] = instruct_dict.get("conversations", "")
        if len(instruct_dict_new["conversations"]) == 2:
            if instruct_dict_new["conversations"][0]["from"] == 'human':
                value = instruct_dict_new["conversations"][0]["value"].replace(' ', '').strip()
                if "true或false" in value:
                    continue
                # instruct_dict_new["conversations"][0]["value"] = "企业微信场景，" + value
        new_data.append(instruct_dict_new)

    random.shuffle(new_data)

    return new_data[:int(len(new_data)*0.3)]


def get_dialogue_data(path):
    filenames = os.listdir(path)
    new_data = []
    for file in filenames:
        if '.json' not in file:
            continue
        data = utils.jload(path + "/" + file)
        for instruct_dict in tqdm.tqdm(data):
            instruct_dict_new = {}
            instruct_dict_new["id"] = instruct_dict.get("id", "")
            instruct_dict_new["conversations"] = []
            messages = instruct_dict.get("messages", "")
            for m in messages:
                new_dct = {}
                if m["role"] == 'user':
                    new_dct['from'] = 'human'
                    new_dct['value'] = m["content"]
                if m["role"] == 'assistant':
                    new_dct['from'] = 'gpt'
                    new_dct['value'] = m["content"]
                instruct_dict_new["conversations"].append(new_dct)
            new_data.append(instruct_dict_new)

    return new_data


def get_llm_data(path):
    new_data = []
    filenames = os.listdir(path)
    for file in filenames:
        if '.json' not in file:
            continue
        data = utils.jload(path + "/" + file)
        for instruct_dict in tqdm.tqdm(data):
            # 只保留data的instruction, input, output
            instruct_dict_new = {}
            instruct_dict_new["id"] = instruct_dict.get("id", "")
            instruct_dict_new["conversations"] = instruct_dict.get("conversations", "")
            # if len(instruct_dict_new["conversations"]) == 2:
            #     if instruct_dict_new["conversations"][0]["from"] == 'human':
            #         value = instruct_dict_new["conversations"][0]["value"].replace(' ', '').strip()
                    # instruct_dict_new["conversations"][0]["value"] = "电话场景，" + value
            new_data.append(instruct_dict_new)

    return new_data


def get_konwledge_data(path):
    faqs = []
    for csv_path in path:
        if os.path.isdir(csv_path):
            for file in os.listdir(csv_path):
                if not file.endswith('.csv'):
                    continue
                df = pd.read_csv(csv_path + file)
                faqDictList = df.to_dict('records')
                faqs.extend(faqDictList)
        else:
            df = pd.read_csv(csv_path)
            faqDictList = df.to_dict('records')
            faqs.extend(faqDictList)

    data = []
    for index, faq in enumerate(faqs):
        new_data_dict = {}
        new_data_dict['id'] = str(uuid.uuid1())
        new_data_dict['conversations'] = []
        if "用户问题" in faq:
            page_content = "用户问题：" + str(faq['用户问题']).strip()
            if "用户情境" in faq and pd.isna(faq["用户情境"]) == False:
                page_content += '\n用户情境：' + str(faq['用户情境']).strip()
            if "话术" in faq:
                value = '\n话术：' + str(faq['话术']).strip()
            else:
                value = '\n方案：' + str(faq['方案']).strip()

        elif "沟通策略" in faq:
            page_content = "沟通策略：" + str(faq['沟通策略']).strip()
            if "用户情境" in faq and pd.isna(faq["用户情境"]) == False:
                page_content += '\n用户情境：' + str(faq['用户情境']).strip()
            if "话术" in faq:
                value = '\n话术：' + str(faq['话术']).strip()
            else:
                value = '\n方案：' + str(faq['方案']).strip()

        new_data_dict['conversations'].append({"from": "human", "value": page_content})
        new_data_dict['conversations'].append({"from": "gpt", "value": value})
        data.append(new_data_dict)

    return data


def get_deal_data(path):
    datas = []
    for csv_path in path:
        if os.path.isdir(csv_path):
            for file in os.listdir(csv_path):
                if not file.endswith('.csv'):
                    continue
                df = pd.read_csv(csv_path + file)
                faqDictList = df.to_dict('records')
                datas.extend(faqDictList)
        else:
            df = pd.read_csv(csv_path)
            faqDictList = df.to_dict('records')
            datas.extend(faqDictList)

    all_sessions = {}
    for index, faq in enumerate(datas):
        sessionId = str(faq['sessionId'])
        if sessionId not in all_sessions:
            all_sessions[sessionId] = []
            # all_sessions[sessionId].append({"role": "用户", "value": "你好。"})
        all_sessions[sessionId].append({"role": str(faq['角色']), "value": str(faq['过滤文本']), "sign": str(faq["标签"])})

    all_sessions_new = []
    for session_id, session_value in all_sessions.items():
        sessions_dict = {}
        new_session_value = []
        #合并临近相同角色的内容
        for session_record in session_value:
            if len(new_session_value) == 0:
                new_session_value.append(session_record)
            else:
                if session_record['role'] == new_session_value[-1]['role']:
                    new_session_value[-1]['value'] += session_record['value']
                else:
                    new_session_value.append(session_record)

        # if len(new_session_value) > 7:
        if len(new_session_value) > 0:
            sessions_dict["session_id"] = session_id
            sessions_dict["conversations"] = new_session_value
            all_sessions_new.append(sessions_dict)


    return all_sessions_new


def write_json(path, data):
    utils.jdump(data, path)


def merge():
    #mock的指令数据
    llm_data = get_llm_data("../data/llm/")

    #mock的对话数据
    dialogue_data = get_dialogue_data("../data/dialogue")

    #知识库
    d3 = get_konwledge_data(["../internal_server/bot/financial_sales/knowledge_base/raw_data/"])
    #线上成交对话数据
    # d4 = get_deal_data(["../deal_data/"])
    #企微数据
    # d5 = get_wechat_data("/Users/cy/Downloads/wechat_data_20230928.json")

    # 旧数据
    # old_data = utils.jload("financial_data_1027.json")

    all_data = []
    #累积数据
    # dd = llm_data + dialogue_data + old_data
    #一天数据
    dd = llm_data + dialogue_data + d3
    for d in dd:
        instruct_dict_new = {}
        instruct_dict_new["id"] = str(uuid.uuid1())
        instruct_dict_new["conversations"] = d.get("conversations", "")
        flag = 0
        for conv in instruct_dict_new["conversations"]:
            #过滤超时数据
            if conv["value"] == "":
                flag = 1
                break
            #过滤模拟用户数据
            if "你作为一名用户接到了一个电话" in conv["value"]:
                flag = 1
                break
        if flag == 0:
            all_data.append(instruct_dict_new)

    output_path = "financial_data_1030_all.json"
    random.shuffle(all_data)
    print(f"all data length: {len(all_data)}")
    write_json(output_path, all_data)


if __name__ == "__main__":
    # merge()
    results = get_deal_data(["../data_set/financial_data/raw_data/"])
    print(results)

