# _*_ coding : utf-8 _*_
# @Time : 2023/10/9 10:46
# @Author : momo
# @File : data_helper
# @Project : bert-textcnn
# own
import os
import time

from sklearn.metrics import f1_score, recall_score, precision_score
from tensorflow import keras
from tensorflow.python.keras.callbacks import EarlyStopping, Callback

import data_helper
from data_helper import my_print
import config
import random
# pickle序列化和反序列化，且可读性差
import pickle
import numpy as np
import matplotlib.pyplot as plt
# tqdm是进度条工具
from tqdm import tqdm
# mlb对多标签进行统计等
from sklearn.preprocessing import MultiLabelBinarizer
# own
from ha_bert_textcnn import build_bert_textcnn_model
# bert
from keras_bert import Tokenizer, load_vocabulary
from _Logger import Logger
from sklearn.model_selection import KFold


# def encode_labels(train_label_y, test_label_y):
#     # 对标签集编码
#     mlb = MultiLabelBinarizer()
#     mlb.fit(train_label_y)
#     label_counts=len(mlb.classes_)
#     # 序列化 mlb 并存储
#     pickle.dump(mlb, open(config.mlb_path, 'wb'))
#     # 使用 MultiLabelBinarizer 编码标签
#     train_y = np.array(mlb.transform(train_label_y))
#     test_y = np.array(mlb.transform(test_label_y))
#     return  train_y,test_y,label_counts

# 是要[[[],[],[]],[...],[...]],还是要拆开[]得到[[],[],[],[],[]]
def encode_labels(train_label_y, vali_label_y):
    # 对标签集编码
    global train1, train2, train3, vali1, vali2, vali3
    mlb1 = MultiLabelBinarizer()
    mlb2 = MultiLabelBinarizer()
    mlb3 = MultiLabelBinarizer()
    for i in range(3):
        train = train_label_y[i]
        vali = vali_label_y[i]
        if i == 0:
            # fit
            mlb1.fit(train + vali)
            # dump
            pickle.dump(mlb1, open(config.mlb_dir + "0.pkl", 'wb'))
            # trans
            train1 = np.array(mlb1.transform(train))
            vali1 = np.array(mlb1.transform(vali))
        if i == 1:
            # fit
            mlb2.fit(train + vali)
            # dump
            pickle.dump(mlb2, open(config.mlb_dir + "1.pkl", 'wb'))
            # trans
            train2 = np.array(mlb2.transform(train))
            vali2 = np.array(mlb2.transform(vali))
        if i == 2:
            # fit
            mlb3.fit(train + vali)
            # dump
            pickle.dump(mlb3, open(config.mlb_dir + "2.pkl", 'wb'))
            # trans
            train3 = np.array(mlb3.transform(train))
            vali3 = np.array(mlb3.transform(vali))
    # 临时表示
    dh = data_helper.DataHelper()
    match_dict = dh.make_node_dict("节点名称", "节点编码")
    l1 = mlb1.classes_.tolist()
    l1 = [match_dict[a[a.rfind("-") + 1:]] for a in l1]
    print(l1)
    l2 = mlb2.classes_.tolist()
    l2 = [match_dict[a[a.rfind("-") + 1:]] for a in l2]
    print(l2)
    l3 = mlb3.classes_.tolist()
    l3 = [match_dict[a[a.rfind("-") + 1:]] for a in l3]
    print(l3)

    print(len(mlb1.classes_.tolist()), len(mlb2.classes_.tolist()), len(mlb3.classes_.tolist()))


    return [train1, train2, train3], \
        [vali1, vali2, vali3], \
        [len(mlb1.classes_.tolist()), len(mlb2.classes_.tolist()), len(mlb3.classes_.tolist())]


def encode_text_list(content_list, tokenizer):
    token_ids = []
    segment_ids = []
    # 对迭代对象显示进度条
    for line in tqdm(content_list):
        token_id, segment_id = tokenizer.encode(first=line, max_len=config.max_len)
        token_ids.append(token_id)
        segment_ids.append(segment_id)

    # print("==========token-ids is==========")
    # print(token_ids)
    # print("==========seg-ids is============")
    # print(segment_ids)

    encoding_res = [np.array(token_ids), np.array(segment_ids)]
    return encoding_res


def encode_texts(train_content_x, vali_content_x):
    # 加载bert字典，构造分词器。
    token_dict = load_vocabulary(config.bert_dict_path)
    tokenizer = Tokenizer(token_dict)
    # 对训练集与测试集的文本编码
    train_x = encode_text_list(train_content_x, tokenizer)
    val_x = encode_text_list(vali_content_x, tokenizer)
    return train_x, val_x


def shuffle(train_content_x, train_label_y):
    # 打乱训练集的数据
    index = [i for i in range(len(train_content_x))]
    random.shuffle(index)  # 打乱索引表
    # 按打乱后的索引，重新组织训练集
    train_content_x = [train_content_x[i] for i in index]
    new_train_label_y = []
    for tly in train_label_y:
        tly = [tly[i] for i in index]
        new_train_label_y.append(tly)
    return train_content_x, new_train_label_y


def draw(history):
    # 训练过程可视化
    # 绘制训练loss和验证loss的对比图
    plt.subplot(2, 1, 1)
    epochs = len(history.history['loss'])
    plt.plot(range(epochs), history.history['loss'], label='loss')
    plt.plot(range(epochs), history.history['val_loss'], label='val_loss')
    plt.legend()
    # 绘制训练acc和验证acc的对比图
    plt.subplot(2, 1, 2)
    epochs = len(history.history['accuracy'])
    plt.plot(range(epochs), history.history['accuracy'], label='acc')
    plt.plot(range(epochs), history.history['val_accuracy'], label='val_acc')
    plt.legend()
    # 保存loss与acc对比图
    plt.savefig("./models/bert-tcnn-ep" + str(config.epochs) + "-ba" + str(config.batch_size) + "-loss-acc.png")


def stronger(encoded_x, encoded_y, mean_std):
    """
    :param encoded_x: np数组  2 [ [[],[],[]],[] ]
    :param encoded_y: np数组  3 [ [[],[],[]],[],[] ]
    :return:
    """
    # 方便增删（目前只有增），所以选择了把np-array转为普通[ ]
    print("增强前，标签数量：")
    get_label_sum_list(encoded_y)

    token_ids_array, segment_ids = encoded_x[0].tolist(), encoded_x[1].tolist()
    y1 = encoded_y[0].tolist()
    y2 = encoded_y[1].tolist()
    y3 = encoded_y[2].tolist()
    # 判断不同的标签是否要增强
    for i in range(len(y1)):
        # 取出 one-hot 确定标签是否需要增强
        one_hot1 = y1[i]
        one_hot2 = y2[i]
        one_hot3 = y3[i]
        # TODO 目前选择了有稀有标签就增强，没有针对稀有标签所在的层级做决策
        # if should_be_strong(one_hot1,one_hot2,one_hot3):
        #     # 文本特征维度
        #     # j=0是句子开头 j=-1是句子结尾（如果句子填满256特征的话）
        #     for j in range(1, config.max_len - 1):
        #         # 没到句子结尾  102是bert句子结尾的标识
        #         if token_ids_array[i][j] != 102:
        #             # 复制一个新的文本
        #             copy_id = token_ids_array[i]
        #             # 只在一个维度上添加噪声
        #             copy_id[j] += mean_std[j]
        #             # 添加新增内容+附带分段+原标签（即仍然认为这个文本属于这些标签）
        #             token_ids_array.append(copy_id)
        #             segment_ids.append(segment_ids[i])
        #             y1.append(y1[i])
        #             y2.append(y2[i])
        #             y3.append(y3[i])
        #         else:
        #             # 下一个样本
        #             break
    print("增强后，标签数量：")
    get_label_sum_list([y1, y2, y3])
    # 增强后随机打乱训练集 传入普通[ ] 重新返回np.array
    encoded_x, encoded_y = shuffle_encoded_xy([token_ids_array, segment_ids], [y1, y2, y3])
    return encoded_x, encoded_y


def get_label_sum_list(y):
    # 节点匹配表
    # dh = data_helper.DataHelper()
    # match_dict = dh.make_node_dict("节点名称", "节点编码")
    # 存类和对应数量
    class_list = []
    all_nums_list = []
    for i in range(3):
        mlb = pickle.load(open(config.mlb_dir + str(i) + ".pkl", 'rb'))
        # 标签
        curr_class = mlb.classes_.tolist()
        class_list.append(curr_class)
        # 计标签数  按列求和
        nums_list = list(map(sum, zip(*y[i])))
        my_print("nums list{}".format(i + 1), nums_list)
        all_nums_list.append(nums_list)


def shuffle_encoded_xy(encoded_x_arr, encoded_y_arr):
    torkn_ids, segment_ids = encoded_x_arr[0], encoded_x_arr[1]
    y1, y2, y3 = encoded_y_arr[0], encoded_y_arr[1], encoded_y_arr[2]
    indexes = [i for i in range(len(torkn_ids))]
    random.shuffle(indexes)
    torkn_ids = [torkn_ids[i] for i in indexes]
    segment_ids = [segment_ids[i] for i in indexes]
    y1 = [y1[i] for i in indexes]
    y2 = [y2[i] for i in indexes]
    y3 = [y3[i] for i in indexes]

    encoded_x = [np.array(torkn_ids), np.array(segment_ids)]
    encoded_y = [np.array(y1), np.array(y2), np.array(y3)]

    return encoded_x, encoded_y


def reverse_one_hot(arr):
    # 取反  如[1,1,0,0] -> [0,0,1,1]
    return [i ^ 1 for i in arr]


def should_be_strong(one_hot1, one_hot2, one_hot3) -> bool:
    # 稀有标签数组（根据标签的统计数据和增强后的标签统计数据分析决定）：
    # 3层, 值为1代表该层下的该标签是稀有标签。
    stronger_one_hot = [
        [0, 1, 0, 1, 1, 0, 0],
        [1, 1, 0, 0, 1,
         0, 1, 1, 0, 0,
         0, 1, 0, 0, 1,
         0, 0, 0, 0, 0,
         0, 1, 1],
        [1, 1, 1, 1, 1, 0, 1, 1, 0, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
         1, 1, 0, 1, 1, 1, 1, 1, 1, 0,
         0, 0, 1, 0, 0, 1, 0, 0, 1, 0,
         0, 1, 1, 1, 1, 1, 1, 1, 0, 1,
         1, 1, 1, 1, 1, 1, 1, 1, 1, 0,
         1, 1, 1, 1
         ]
    ]
    # TODO 初步算法：检测到稀有标签但是频繁标签出现率高于2/3 则认为它不稀有 无须增强

    # 原算法 有稀有标签就增强，但是这个稀有标签的决定是根据之前一版 stronger_one_hot（几乎全1）的【增强结果】决定的，而不是普通的label_nums_dict。
    return compare_one_hot(one_hot1, stronger_one_hot[0]) | \
        compare_one_hot(one_hot2, stronger_one_hot[1]) | \
        compare_one_hot(one_hot3, stronger_one_hot[2])


def compare_one_hot(one_hot, rule_one_hot):
    for i in range(len(one_hot)):
        if one_hot[i] == 1 & rule_one_hot[i] == 1:
            return True
    return False


def select_dfs_to_calculate_std(df):
    # 此字典利用 4919+4896过拟合的结果得出 模型对这些标签更熟悉，因此我打算使用他们的x来计算标准
    _dict = {
        # 单出现率：  140     450     145       77
        # 名称       001    003     006      002
        "标签1级": ["原材料", "芯片设计", "产业配套", "设备"],
        # 频率  239  110 52  43  45 40
        # 名称  020  030 023 009 022 021
        "标签2级": ["芯片设计-集成电路设计", "产业配套-半导体贸易", "芯片设计-光电器件设计", "原材料-封装材料",
                    "芯片设计-传感器设计", "芯片设计-分立器件设计"],
        # 101  55   27    17
        # 111  132  112  114
        "标签3级": ["芯片设计-集成电路设计-模拟/数模混合电路", "产业配套-半导体贸易-代理分销",
                    "芯片设计-集成电路设计-数字逻辑电路", "芯片设计-分立器件设计-功率器件"],
    }
    dfs = []
    for key, value in _dict.items():
        for v in value:
            dfs.append(df[df[key] == v])
    return dfs


def calculate_df_std(df):
    dfs = select_dfs_to_calculate_std(df)
    stds = []
    for df in dfs:
        # 对每个df的x编码
        dh = data_helper.DataHelper()
        train_content_x, train_labels_y = dh.load_data(df)
        train_x, null_vali_x = encode_texts(train_content_x, [])
        # 编码后按列（特征值）求方差
        std = np.std(train_x, axis=1)
        stds.append(std[0])
    # 按行取平均 得到256个特征的总平均
    mean_std = np.mean(stds, axis=0)
    # print("mean_std",mean_std)
    # print("max of mean std:",max(mean_std))
    # TODO 目前担心std太大，噪声过大，暂取1/2
    return mean_std * 1 / 2


def make_3_to_1(y, i):
    res = []
    for j in range(3):
        for k in y[j][i]:
            res.append(k)
    return res

if __name__ == "__main__":
    # 日志记录
    Logger.build("train")

    dh = data_helper.DataHelper()
    # 读取和拼接
    xy = dh.read_and_merge_content_excl(config.ori_demo_path, config.ori_zl_path, config.ori_node_path)
    xy.fillna("")
    # 读取训练集  900 留100给测试集
    xy = xy[-1000:]

    # val_xy=xy[:100]
    # train_xy=xy[100:]

    # train_xy=dh.copy_resample(train_xy)
    # # val_xy=dh.copy_resample(val_xy)
    # # train_xy,val_xy=dh.split_df(xy)
    #
    #
    # # 数据加载
    # train_content_x, train_labels_y = dh.load_data(train_xy)
    # vali_content_x, vali_labels_y = dh.load_data(val_xy)
    #
    # # 打乱
    # train_content_x, train_labels_y = shuffle(train_content_x, train_labels_y)
    # vali_content_x, vali_labels_y = shuffle(vali_content_x, vali_labels_y)
    # # 文本编码
    # train_x, vali_x = encode_texts(train_content_x, vali_content_x)
    #
    # # 标签编码
    # train_y, vali_y, labels_herarichy = encode_labels(train_labels_y, vali_labels_y)
    #
    # # 数据增强
    # # train_x, train_y = stronger(train_x, train_y, mean_std)
    #
    # train_y_3 = [make_3_to_1(train_y, i) for i in range(len(train_y[0]))]
    # train_y_3 = np.array(train_y_3)
    # # train_y.append(train_y_3)
    # vali_y_3 = [make_3_to_1(vali_y, i) for i in range(len(vali_y[0]))]
    # vali_y_3 = np.array(vali_y_3)
    # # vali_y.append(vali_y_3)
    # # 建模
    # model = build_bert_textcnn_model(config.bert_config_path, config.bert_checkpoint_path, labels_herarichy)
    #
    # # tensor board
    # if not os.path.exists(config.tb_logs_dir):
    #     os.mkdir(config.tb_logs_dir)
    # tensorboard_callback = keras.callbacks.TensorBoard(
    #     log_dir=config.tb_logs_dir,
    #     histogram_freq=1,
    #     write_graph=True,
    #     write_images=True
    # )
    #
    # # Early stop
    # early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)
    #
    # # 使用这个回调函数进行模型训练
    # model.fit(train_x, train_y,
    #           validation_data=(vali_x, vali_y),
    #           batch_size=config.batch_size,
    #           epochs=config.epochs,
    #           callbacks=[
    #               # tensorboard_callback,
    #               early_stopping
    #               # Metrics(valid_data=(vali_x,vali_y))
    #           ]
    #           )
    #
    # model.save(config.model_path)
    # print("模型保存成功！")

    # ===================K折=======================
    # xy=xy[xy["标签总数"]>=3]

    # 计算文本x的标准差 以做数据增强
    # mean_std = calculate_df_std(xy)

    # 根据标签总数情况 重新采样 并且返回分好的训练集和验证集
    # xy = dh.over_resample(xy)

    kfold = KFold(n_splits=config.num_folds, shuffle=True, random_state=config.random_seed)
    for train_index, val_index in kfold.split(xy):
        train_xy = xy.iloc[train_index]
        val_xy = xy.iloc[val_index]

        # train_xy=dh.copy_resample(train_xy)

        # 数据加载
        train_content_x, train_labels_y = dh.load_data(train_xy)
        vali_content_x, vali_labels_y = dh.load_data(val_xy)

        # 打乱
        train_content_x, train_labels_y = shuffle(train_content_x, train_labels_y)
        vali_content_x, vali_labels_y = shuffle(vali_content_x, vali_labels_y)
        # 文本编码
        train_x, vali_x = encode_texts(train_content_x, vali_content_x)
        # 标签编码
        train_y, vali_y, labels_herarichy = encode_labels(train_labels_y, vali_labels_y)

        # 数据增强
        # train_x, train_y = stronger(train_x, train_y, mean_std)

        # train_y_3 = [make_3_to_1(train_y, i) for i in range(len(train_y[0]))]
        # train_y_3 = np.array(train_y_3)
        # # train_y.append(train_y_3)
        # vali_y_3 = [make_3_to_1(vali_y, i) for i in range(len(vali_y[0]))]
        # vali_y_3 = np.array(vali_y_3)
        # # vali_y.append(vali_y_3)

        # 建模
        model = build_bert_textcnn_model(config.bert_config_path, config.bert_checkpoint_path, labels_herarichy)

        # tensor board
        if not os.path.exists(config.tb_logs_dir):
            os.mkdir(config.tb_logs_dir)
        tensorboard_callback = keras.callbacks.TensorBoard(
            log_dir=config.tb_logs_dir,
            histogram_freq=1,
            write_graph=True,
            write_images=True
        )

        # Early stop
        early_stopping = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True)

        # 使用这个回调函数进行模型训练
        model.fit(train_x, train_y,
                    validation_data=(vali_x, vali_y),
                    batch_size=config.batch_size,
                    epochs=config.epochs,
                    callbacks=[
                            tensorboard_callback,
                            early_stopping,
                            # Metrics(valid_data=(vali_x,vali_y))
                                       ]
                            )

        model.save(config.model_path)
