# _*_ coding : utf-8 _*_
# @Time : 2023/10/9 10:46
# @Author : momo
# @File : data_helper
# @Project : bert-textcnn
import _Logger
import config
import pickle
import numpy as np
from tqdm import tqdm
from keras_bert import get_custom_objects
from tensorflow.keras.models import load_model
from keras_bert import load_vocabulary, Tokenizer
from data_helper import DataHelper
from ha_bert_textcnn import focal_loss_with_rdrop
import tensorflow as tf
# 加载bert字典，构造分词器。
token_dict = load_vocabulary(config.bert_dict_path)
tokenizer = Tokenizer(token_dict)

# 将自定义损失函数添加到 custom_objects
custom_objects = get_custom_objects()
custom_objects["tf"]=tf
# custom_objects["custom_loss"]=local_plus_global_plus_L2_loss
custom_objects["focal_loss_with_rdrop"]=focal_loss_with_rdrop
# custom_objects["real_loss"]=local_plus_global_plus_L2_loss([7,23,64],[],config.l2_reg_lambda)

# 加载训练好的模型
model = load_model(config.model_path, custom_objects=custom_objects)

mlb1 = pickle.load(open('./data/mlb0.pkl', 'rb'))
mlb2 = pickle.load(open('./data/mlb1.pkl', 'rb'))
mlb3 = pickle.load(open('./data/mlb2.pkl', 'rb'))

def index_greater_equal(prediction,thresh):
    return [i for i in range(len(prediction)) if prediction[i]>=thresh]

def predict_one(text, rematch_dict):
    # 对句子编码
    token_id, segment_id = tokenizer.encode(first=text, max_len=config.max_len)
    predictions = model.predict([np.array([token_id]), np.array([segment_id])])

    # global版
    # maybe_choose_index=index_greater_equal(predictions[0],0.1)
    # print(predictions)
    #
    # cls_list1 = mlb1.classes_.tolist()
    # cls_list1 = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list1]
    # cls_list2 = mlb2.classes_.tolist()
    # cls_list2 = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list2]
    # cls_list3 = mlb3.classes_.tolist()
    # cls_list3 = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list3]
    #
    # cls=cls_list1+cls_list2+cls_list3

    # print("")
    # print("==============类别：=======================")
    # print(cls)

    # print("==============大于0.1的下标=======================")
    # print(maybe_choose_index)
    #
    # print("==============可能选=======================")
    # print([cls[i] for i in maybe_choose_index])

    pre_label_list=[]
    one_hot_list=[]

    # 3个输出
    for i in range(3):
        # 取出indexes对应的labels  ./data/mlb{i}.pkl
        mlb = pickle.load(open(config.mlb_dir + str(i) + ".pkl", 'rb'))
        if i ==0:
            p = predictions[i][0]
        elif i==1:
            p=predictions[i][0]
        else:
            p=predictions[i][0]


        # 选择概率合适的标签下标
        ar = 0
        indexes=[]
        if i == 0:

            cls_list = mlb.classes_.tolist()
            cls_list = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list]
            print("")
            print("==============类别：=======================")
            print(cls_list)
            print("==============概率：=======================")
            print(p)

            # 1类单标签：多标签=6：1

            # 422   35

            ar = 0.3438
            indexes = choose_indexes(p,ar)
        if i == 1:

            # cls_list = mlb.classes_.tolist()
            # cls_list = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list]
            # print("")
            # print("==============类别：=======================")
            # print(cls_list)
            # print("==============概率：=======================")
            # print(p)

            # 2：1
            ar = 0.30
            indexes = [j for j in range(len(p)) if p[j] > ar]
        if i == 2:

            # cls_list = mlb.classes_.tolist()
            # cls_list = [rematch_dict[l[l.rfind("-") + 1:]] for l in cls_list]
            # print("")
            # print("==============类别：=======================")
            # print(cls_list)
            # print("==============概率：=======================")
            # print(p)

            # 3：5
            ar = 0.2
            indexes = [j for j in range(len(p)) if p[j] > ar]
        label = [mlb.classes_.tolist()[j] for j in indexes]
        one_hot = np.where(p >ar, 1, 0)
        one_hot_list.append(one_hot)
        pre_label_list.append(label)

    return one_hot_list,pre_label_list

def choose_indexes(p, ar):
    indexes = [j for j in range(len(p)) if p[j] > ar]
    # 没有的标签就选最大概率的那个，毕竟多不扣分
    if len(indexes)<=0:
        mx=0
        mxi=0
        for i in range(len(p)):
            if p[i]>mx:
                mx=p[i]
                mxi=i
        indexes.append(mxi)
    return indexes
def one_hot_f1_score(pre, _true):

    # print("pre_list:",pre)
    # print("true_list:",_true)
    """
    :param pre:   0 1 1 0
    :param _true:  1 1 0 0
    :return:
    """
    pre_1=0
    actual_1=0
    true_1=0
    # 两个一热码比较
    for i in range(len(pre)):
        if pre[i]==1 & _true[i]==1:
            pre_1+=1
            actual_1+=1
            true_1+=1
        elif pre[i]==1:
            pre_1+=1
        elif _true[i]==1:
            true_1+=1
    # 一个对的标签都没有 直接0分
    if actual_1==0:
        return 0
    recall_rate=float(actual_1/pre_1)
    accuracy_rate=float(actual_1/true_1)
    f1_score=2*(recall_rate*accuracy_rate)/(recall_rate+accuracy_rate)
    return f1_score

# 评分的f1应该指的是整个文本算一个f1  而不是算三个标签的f1求平均得到1个f1
def f1_score(y_pre,y_true):
    """
    :param y_pre: [[[],[]],[],[]]
    :param y_true: [[[],[]],[],[]]
    :return:
    """
    def is_all_0(y):
        for i in y:
            if i==1:
                return False
        return True
    # 赛题机制：
    # 真实标签为空则不管预测标签

    f1_score_result_list=[]
    # i指的是第几个文本
    for i in range(len(y_pre[0])):

        _pre_1=list(y_pre[0][i])
        _pre_2=list(y_pre[1][i])
        _pre_3=list(y_pre[2][i])

        _true_1=list(y_true[0][i])
        _true_2=list(y_true[1][i])
        _true_3=list(y_true[2][i])

        # L2 L3都是空
        if is_all_0(_true_3) & is_all_0(_true_2):
            f1s=one_hot_f1_score(_pre_1,_true_1)
        # L2不为空 只有L3空
        elif is_all_0(_true_3):
            f1s=one_hot_f1_score(_pre_1+_pre_2,_true_1+_true_2)
        else:
            f1s=one_hot_f1_score(_pre_1+_pre_2+_pre_3,_true_1+_true_2+_true_3)

        f1_score_result_list.append(f1s)

    # 展示f1分数列表
    print(f1_score_result_list)
    # 统计各个f1出现的次数
    f1s_set=set(f1_score_result_list)
    f1s_cnt_dict={}
    for i in f1s_set:
        # 字典项
        # 0.0 : 20次
        # 1.0：10次
        f1s_cnt_dict.update({i:f1_score_result_list.count(i)})
    print(f1s_cnt_dict)

    return float(sum(f1_score_result_list)/len(f1_score_result_list))

# 分3层的f1
# def f1_score(y_pre,y_true):
#     """
#     :param y_pre: [[[],[]],[],[]]
#     :param y_true: [[[],[]],[],[]]
#     :return:
#     """
#     f1_score_result_list=[]
#     total_f1_list=[]
#     for i in range(len(y_pre)):
#         one_pre=y_pre[i]
#         one_true=y_true[i]
#         fs=[]
#         for i in range(len(one_pre)):
#             f1_score=one_hot_f1_score(one_pre[i],one_true[i])
#             fs.append(f1_score)
#         total_f1_list.append(fs)
#
#     for i in range(len(total_f1_list[0])):
#         f1_1=total_f1_list[0][i]
#         f1_2=total_f1_list[1][i]
#         f1_3=total_f1_list[2][i]
#         # 为空的情况
#         if f1_1==-1:
#             f1_score_result_list.append(1)
#         elif f1_2==-1:
#             f1_score_result_list.append(f1_1)
#         elif f1_3==-1:
#             f1_score_result_list.append((f1_1+f1_2)/2)
#         else:
#             f1_score_result_list.append((f1_1+f1_2+f1_3)/3)
#     print(f1_score_result_list)
#
#     return float(sum(f1_score_result_list)/len(f1_score_result_list))



def evaluate():
    dh = DataHelper()

    xy = dh.read_and_merge_content_excl(config.ori_demo_path, config.ori_zl_path, config.ori_node_path)
    xy=xy[-1000:]
    test_content_x, test_labels_y = dh.load_data(xy,mode='evaluate')

    begin_length=0
    end_length=1000
    test_x=test_content_x[begin_length:end_length]
    test_y=[ y[begin_length:end_length] for y in test_labels_y]

    true_y_list=[]
    true_y1_list = mlb1.transform(test_y[0])
    true_y2_list = mlb2.transform(test_y[1])
    true_y3_list = mlb3.transform(test_y[2])
    true_y_list.append(true_y1_list)
    true_y_list.append(true_y2_list)
    true_y_list.append(true_y3_list)

    pred_y_list = [[],[],[]]
    pred_labels = [[],[],[]]
    rematch_dict = DataHelper.make_node_dict("节点名称", "节点编码")
    for j in tqdm(range(len(test_x))):
        print("")
        print(f"==========第{(j+1)}条==============")
        text=test_x[j]
        pred_y, label = predict_one(text,rematch_dict)

        # 为三个维度添加
        for i in range(3):
            # 一热码可以全0
            pred_y_list[i].append(pred_y[i])
            pred_labels[i].append(label[i])

    f1s=f1_score(pred_y_list,true_y_list)
    print("f1_score:{}".format(f1s))


if __name__ == "__main__":
    # _Logger.Logger.build("evaluate")
    evaluate()


# 37  368  3478  3438  363 362  3452