# _*_ coding : utf-8 _*_
# @Time : 2023/10/9 10:46
# @Author : momo
# @File : data_helper
# @Project : bert-textcnn
from tensorflow.python.keras.backend import sigmoid
from tensorflow.python.keras.layers import  Lambda, Add

import config
from tensorflow import keras
from keras_bert import load_trained_model_from_checkpoint
import tensorflow as tf
from tensorflow.keras.losses import kullback_leibler_divergence as kld
from tensorflow.keras.backend import log
from tensorflow.keras.models import Model
from tensorflow.keras.layers import \
    Dropout, Concatenate, \
    Dense


def textcnn(inputs):
    # MSML 上层粗，下层细
    kernel_size = [5,4,3]

    cnn_features = []
    for size in kernel_size:
        cnn = keras.layers.Conv1D(
            filters=256,
            kernel_size=size,
            activation='relu'
        )(inputs)  # shape=[batch_size,maxlen-2,256]
        cnn = keras.layers.GlobalMaxPooling1D(name='cnn_pool_of_kernel_'+str(size))(cnn)  # shape=[batch_size,256]
        cnn_features.append(cnn)

    # # 对kernel_size=3、4、5时提取的特征进行拼接
    # output = keras.layers.concatenate(cnn_features, axis=-1)  # [batch_size,256*3]
    # # 返回textcnn提取的特征结果
    # return output

    # MSML-BERT不用拼接
    return cnn_features


"""
labels_hierarchy : []

1. val>train的原因可能是：
    1.1 两个集的分类不均衡
    1.2 使用提前停，没有看到更多epoch下的train效果
    1.3 这也是正常的，因为train的时候引入了dropout，而vali的时候神经元全开
2. 目前的问题：
    2.1 val_acc一开始就保持不变
        2.1.1 可能是网络层的问题，某些参数出现问题，或者因为太稀疏了，有的地方本来就学不到。
    2.2 层级信息往下传递，但是对于无2，3级标签的训练可能有误差
"""


def build_bert_textcnn_model(config_path, checkpoint_path, num_classes_list):
    # 加载BERT模型
    bert = load_trained_model_from_checkpoint(
        config_file=config_path,
        checkpoint_file=checkpoint_path,
        seq_len=None
    )
    # 取出[cls]，可以直接用于分类，也可以与其它网络的输出拼接。
    cls_features = keras.layers.Lambda(
        lambda x: x[:, 0],
        name='cls'
    )(bert.output)  # shape=[batch_size,768]

    # 去除第一个[cls]和最后一个[sep]，得到输入句子的embedding
    word_embedding = keras.layers.Lambda(
        lambda x: x[:, 1:-1],
        name='word_embedding'
    )(bert.output)  # (batch_size,max_len-2,768)
    # (bs,1536)
    # 实验：
    # 纯cls fc 512
    # 纯cnn fc 512
    # cls+cnn fc 1024
    # cnn_feature=textcnn(word_embedding)
    # cls_cnn_feature = Concatenate()([cls_features, cnn_feature])

    # HARNN 不选择
    # # 第一级
    # first_att_weight, first_att_out = attention_layer(word_embedding, num_classes_list[0], config.attention_unit_size,  name="first-")
    # # (bs,2304)
    # first_local_input = Concatenate()([first_att_out,cls_cnn_feature])
    # # (bs,fc_size)
    # first_local_fc = fc_layer(first_local_input, name="first-local-")
    # first_scores, first_visual = local_layer(first_local_fc, first_att_weight, num_classes_list[0], name="first-")
    # # 第二级
    # second_att_input = Multiply()([word_embedding, Lambda(lambda x: tf.expand_dims(x, -1))(first_visual)])
    # second_att_weight, second_att_out = attention_layer(second_att_input, num_classes_list[1],config.attention_unit_size, name="second-")
    # second_local_input = Concatenate()([second_att_out,cls_cnn_feature])
    # second_local_fc = fc_layer(second_local_input, name="second-local-")
    # second_scores, second_visual = local_layer(second_local_fc, second_att_weight,num_classes_list[1], name="second-")
    # # 第三级
    # third_att_input = Multiply()([word_embedding, Lambda(lambda x: tf.expand_dims(x, -1))(second_visual)])
    # third_att_weight, third_att_out = attention_layer(third_att_input, num_classes_list[2], config.attention_unit_size, name="third-")
    # third_local_input = Concatenate()([third_att_out,cls_cnn_feature])
    # third_local_fc = fc_layer(third_local_input, name="third-local-")
    # third_scores, third_visual = local_layer(third_local_fc, third_att_weight, num_classes_list[2],name="third-")
    # # 合并三级的局部
    # global_feature = Concatenate()([first_local_fc, second_local_fc, third_local_fc])
    # # 全连接层
    # global_fc = fc_layer(global_feature)
    # # Highway层  和准确度紧密相关
    # highway = highway_layer(global_fc, global_fc.get_shape()[1], num_layers=0, bias=3)
    #
    # # Dropout层
    # h_drop = Dropout(config.drop_rate)(highway)
    #
    # # 全局输出层
    # global_scores = Dense(sum(num_classes_list),activation="sigmoid", name="global_score")(h_drop)
    # # 输出层
    # scores = output_layer(first_scores, second_scores, third_scores, config.alpha, global_scores)

    # 如果使用简单的全连接 不加att、不加highway，注释上面的层级代码，打开这个，换loss

    # 第一版纯bert-textcnn
    # fc = Dense(units=config.fc_size, activation='relu', name='fc')(cls_cnn_feature)

    # now : MSML_BERT  +  focal_loss with R-drop  + local and global info
    # L1  最高层
    # cnn_feature [bs,256]  核大小5 卷积粗粒度
    cnn_feature=textcnn(word_embedding)
    first_cnn_feature=cnn_feature[0]
    all_1=tf.concat([cls_features,first_cnn_feature],axis=-1)
    all_1=Dropout(config.drop_rate)(all_1)
    # 全连接 [bs,128]
    fc1=Dense(config.fc_size,activation='relu',name='fc1')(all_1)
    # 输出 [bs,L1_size]
    o1=Dense(num_classes_list[0],activation='sigmoid',name='o1')(fc1)

    # L2 中间层
    # fc1+fc2--highway-->fc22->o2
    # cnn_feature [bs,256]  核大小4 卷积中粒度
    second_cnn_feature=cnn_feature[1]
    second_cnn_feature=Dropout(0.2)(second_cnn_feature)
    # 全连接 [bs,128]
    fc2=Dense(config.fc_size,activation='relu',name='fc2')(second_cnn_feature)
    # 门控
    fc22=HGM(fc1,fc2,1)
    # 输出
    o2=Dense(num_classes_list[1],activation='sigmoid',name='o2')(fc22)

    # L3 最底层（叶子层）
    # fc22+fc3--highway-->fc33->o3
    # cnn_feature [bs,256] 核大小3 卷积细粒度
    third_cnn_feature=cnn_feature[2]
    third_cnn_feature=Dropout(0.2)(third_cnn_feature)
    # fc3 [bs,128]
    fc3=Dense(config.fc_size,activation='relu',name='fc3')(third_cnn_feature)
    # 门控
    fc33=HGM(fc22,fc3,2)
    # [bs,L3_size]
    o3=Dense(num_classes_list[2],activation='sigmoid',name='o3')(fc33)

    # L-All 全局信息
    # global_feature=tf.concat([cls_features,cnn_feature[2],cnn_feature[1],cnn_feature[0]],axis=-1)
    global_feature=tf.concat([cls_features,cnn_feature[0],cnn_feature[1],cnn_feature[2]],axis=-1)
    global_feature=Dropout(config.drop_rate)(global_feature)
    fc_global=Dense(config.fc_size,activation='relu',name='fc_all')(global_feature)
    o_global=Dense(sum(num_classes_list),activation='sigmoid',name='o_global')(fc_global)

    res1,res2,res3=output_layer(o1,o2,o3,config.alpha,o_global,num_classes_list,'result_')

    # 初版
    # fc = Dense(units=config.fc_size, name='fc')(cls_cnn_feature)
    #
    # fc=Lambda(lambda x:normalized_linear_activation(x,0.5))(fc)
    #
    # # 三层输出
    # output_player = []
    # for i in range(len(num_classes_list)):
    #     size = num_classes_list[i]
    #     o1 = Dense(size, activation="sigmoid", name="output_" + str((i + 1)))(fc)
    #     output_player.append(o1)
    # bias 最佳是？
    # highway = highway_layer(fc, fc.get_shape()[1], num_layers=0, bias=-1)
    # 使用门控
    # scores = Dense(sum(num_classes_list), activation="sigmoid", name="score")(highway)
    # 不使用门控
    # scores=Dense(sum(num_classes_list),activation="sigmoid",name="scores")(fc)
    # output.append(scores)

    # 创建模型  可以预见local+global好还是纯global好
    model = Model(inputs=bert.input, outputs=[res1,res2,res3])

    # # # train_vars
    # trainable_variables = model.trainable_variables
    # # custom_loss
    # custom_loss = local_plus_global_plus_L2_loss(num_classes_list, trainable_variables, config.l2_reg_lambda)

    # 编译模型
    model.compile(
        # loss={"output_1": focal_loss,
        #       "output_2": focal_loss,
        #       "output_3": focal_loss
        #       },
        # loss='binary_crossentropy',
        loss=focal_loss_with_rdrop,
        optimizer=keras.optimizers.Adam(config.learning_rate),
        metrics=['accuracy']
    )

    return model


def HGM(parent,child,hierarchy=1):
    if hierarchy==1:
        Fs = sigmoid(tf.add(parent, child))
        Fp = tf.multiply(parent, Fs)
        Fo = tf.concat([Fp, child], axis=-1)
        return Fo
    elif hierarchy==2:
        size=(child.get_shape().as_list()[-1])
        # 取parent后面与child相同长度与child混剪
        add_p = parent[:, -size:]
        pre_p = parent[:, :size]
        Fs = sigmoid(tf.add(add_p, child))
        Fp = tf.multiply(add_p, Fs)
        Fo = tf.concat([pre_p,Fp, child], axis=-1)
        return Fo


# 自定义的归一化线性激活函数 seesaw
# def normalized_linear_activation(x, temperature=1.0):
#     # 计算输入特征的L2范数
#     norm_x = tf.norm(x, axis=1, keepdims=True)
#
#     # 归一化输入特征
#     normalized_x = x / (norm_x + 1e-10)  # 添加小值以避免除以零
#
#     # 计算z = τ * x~
#     z = temperature * normalized_x
#
#     return z


def focal_loss_with_rdrop(y_true, y_pred, alpha=0.25, gamma=2, beta=4):
    zeros = tf.zeros_like(y_pred, dtype=y_pred.dtype)

    # For positive prediction, only need consider front part loss, back part is 0;
    # target_tensor > zeros <=> z=1, so positive coefficient = z - p.
    pos_p_sub = tf.where(y_true > zeros, y_true - y_pred, zeros)  # positive sample 寻找正样本，并进行填充

    # For negative prediction, only need consider back part loss, front part is 0;
    # target_tensor > zeros <=> z=1, so negative coefficient = 0.
    neg_p_sub = tf.where(y_true > zeros, zeros, y_pred)  # negative sample 寻找负样本，并进行填充
    per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * log(tf.clip_by_value(y_pred, 1e-8, 1.0)) \
                          - (1 - alpha) * (neg_p_sub ** gamma) * log(tf.clip_by_value(1.0 - y_pred, 1e-8, 1.0))

    loss1=tf.reduce_sum(per_entry_cross_ent)

    loss2 = kld(y_pred[::2], y_pred[1::2]) + kld(y_pred[1::2], y_pred[::2])

    return loss1 + tf.reduce_mean(loss2) / 4 * beta


def output_layer(first_scores, second_scores, third_scores, alpha, global_scores,num_class_list, name="output"):

    local_scores = Concatenate(axis=1)([first_scores, second_scores, third_scores])
    weighted_local_scores = Lambda(lambda x: (1 - alpha) * x)(local_scores)
    weighted_global_scores = Lambda(lambda x: alpha * x)(global_scores)
    scores = Add()([weighted_global_scores, weighted_local_scores])

    return scores[:,:num_class_list[0]],\
        scores[:,num_class_list[0]:num_class_list[0]+num_class_list[1]],\
        scores[:,num_class_list[0]+num_class_list[1]:]


"""Calculate the Seesaw CrossEntropy loss.

    Args:
        cls_score (Tensor): The prediction with shape (N, C),
             C is the number of classes.
        labels (Tensor): The learning label of the prediction.
        label_weights (Tensor): Sample-wise loss weight.
        cum_samples (Tensor): Cumulative samples for each category.
        num_classes (int): The number of classes.
        p (float): The ``p`` in the mitigation factor.
        q (float): The ``q`` in the compenstation factor.
        eps (float): The minimal value of divisor to smooth
             the computation of compensation factor
        reduction (str, optional): The method used to reduce the loss.
        avg_factor (int, optional): Average factor that is used to average
            the loss. Defaults to None.

    Returns:
        Tensor: The calculated loss
"""

# def seesaw_ce_loss(y_true, y_pred, p=2, q=2.1, eps=1e-7, reduction='mean'):
#     cum_samples = tf.reduce_mean(y_true, axis=0)
#
#     y_true = tf.cast(y_true, tf.int32)
#     seesaw_weights = tf.ones_like(y_true, dtype=tf.float32)
#
#     # mitigation factor
#     if p > 0:
#         sample_ratio_matrix = cum_samples[None, :] / cum_samples[:, None]
#         index = tf.cast(sample_ratio_matrix < 1.0, tf.float32)
#         sample_weights =  tf.pow(sample_ratio_matrix, p) * index + (1 - index)
#         mitigation_factor = tf.gather(sample_weights, y_true, axis=1)
#         seesaw_weights = seesaw_weights * mitigation_factor
#
#     # compensation factor
#     if q > 0:
#         scores = y_pred
#         self_scores = scores[
#             torch.arange(0, len(scores)).to(scores.device).long(),
#             labels.long()]
#         score_matrix = scores / self_scores[:, None].clamp(min=eps)
#         index = (score_matrix > 1.0).float()
#         compensation_factor = score_matrix.pow(q) * index + (1 - index)
#         seesaw_weights = seesaw_weights * compensation_factor
#
#     cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))
#
#     loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')
#
#     if label_weights is not None:
#         label_weights = label_weights.float()
#     loss = weight_reduce_loss(
#         loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)
#     return loss


# def seesaw_ce_loss(y_true, y_pred, p=0.8, q=2.0, eps=1e-7, reduction='mean'):
#     # (batch_size,num_cls)
#     cum_samples = tf.reduce_mean(y_true, axis=0)
#     print("label count:",cum_samples)
#     print("")
#
#     y_true = tf.cast(y_true, tf.int32)
#     print('y_true y_pred:',y_true,y_pred)
#     print("")
#
#     seesaw_weights = tf.ones_like(y_true, dtype=tf.float32)
#     # Mitigation factor
#     if p > 0:
#         sample_ratio_matrix = tf.math.divide_no_nan(tf.expand_dims(cum_samples, axis=0),
#                                                     tf.expand_dims(cum_samples, axis=1))
#         print("sample divide matrix 1:",sample_ratio_matrix)
#         print("")
#         index = tf.cast(sample_ratio_matrix < 1.0, tf.float32)
#         print("index 1:",index)
#         print("")
#         sample_weights = tf.pow(sample_ratio_matrix, p) * index + (1 - index)
#         print("sample weight:",sample_weights)
#         mitigation_factor = tf.gather(sample_weights, y_true, axis=1)
#         print("m",mitigation_factor)
#         seesaw_weights = seesaw_weights * mitigation_factor
#         print("new seesaw weight:",seesaw_weights)
#     # Compensation factor
#     if q > 0:
#         scores = y_pred
#         self_scores = tf.gather(scores, tf.range(0, tf.shape(scores)[0]),axis=-1)
#         print("self score 1:",self_scores)
#         self_scores=tf.gather(self_scores,y_true,axis=-1)
#         print("self score 2:",self_scores)
#         score_matrix = scores[None,:] / (self_scores[:,None] + eps)
#         print("score matrix",score_matrix)
#         index = tf.cast(score_matrix > 1.0, tf.float32)
#         print("index2",index)
#         compensation_factor = tf.pow(score_matrix, q) * index + (1 - index)
#         print("c",compensation_factor)
#         seesaw_weights = seesaw_weights * compensation_factor
#
#     neg=1-y_true
#     neg=tf.cast(neg,tf.float32)
#     s=tf.multiply(tf.math.log(seesaw_weights),neg)
#     y_pred = y_pred +s
#
#     y_true=tf.cast(y_true,tf.float32)
#     loss = binary_crossentropy(y_true, y_pred)
#
#     if reduction == 'mean':
#         loss = tf.reduce_mean(loss)
#     elif reduction == 'sum':
#         loss = tf.reduce_sum(loss)
#
#     return loss


# def seesaw_loss(y_true, y_pred):
#
#         p=0.8
#         q=0.8
#         eps=1e-7
#
#         appearance= tf.reduce_sum(y_true, axis=0)
#         conditions = appearance[:, tf.newaxis] > appearance[tf.newaxis, :]
#         trues = (appearance[tf.newaxis, :] / appearance[:, tf.newaxis]) ** p
#         falses = tf.ones((appearance.shape[-1], appearance.shape[-1]), dtype=tf.float32)
#         M = tf.where(conditions, trues, falses)
#
#         conditions = y_pred[:, :,tf.newaxis] > y_pred[:,tf.newaxis, :]
#         trues = tf.ones(shape=(y_pred.shape[-1], y_pred.shape[-1]), dtype=tf.float32)
#         falses = (y_pred[:,tf.newaxis,:] / y_pred[:,:, tf.newaxis]) ** q
#         T = tf.where(conditions, trues, falses)
#
#         appearance= tf.reduce_sum(y_true, axis=0)
#         m_conditions = appearance[:, tf.newaxis] > appearance[tf.newaxis, :]
#         m_trues = (appearance[tf.newaxis, :] / appearance[:, tf.newaxis]) ** p
#         m_falses = tf.ones((appearance.shape[-1], appearance.shape[-1]), dtype=tf.float32)
#         m = tf.where(m_conditions, m_trues, m_falses)
#
#         # Compensation Factor
#         # only error sample need to compute Compensation Factor
#         c_condition = y_pred / (tf.reduce_sum(y_pred * y_true, axis=-1)[:, tf.newaxis])
#         c_condition = tf.stack([c_condition] * y_true.shape[-1], axis=1)
#         c_condition = c_condition * y_true[:, :, tf.newaxis]
#         falses = tf.ones([config.batch_size,c_condition.shape[1],c_condition.shape[2]], dtype=tf.float32)
#         c = tf.where(c_condition > 1, c_condition ** q, falses)
#
#         # Sij = Mij * Cij
#         s =tf.multiply(m[tf.newaxis, :, :] , c)
#         # softmax trick to prevent overflow
#         max_element = tf.reduce_max(y_pred, axis=-1)
#         y_pred = y_pred - max_element[:, tf.newaxis]
#         denominator = sum((
#                               (1 - y_true)[:, tf.newaxis, :]
#                               * s[tf.newaxis, :, :]
#                               * y_pred[:, tf.newaxis, :]),axis=-1) \
#                       + y_pred
#
#         sigma = y_pred / (denominator + eps)
#
#         loss = sum(y_true * tf.math.log(sigma + eps),axis=-1)
#         loss += sum((1-y_true) * tf.math.log((1-sigma) + eps),axis=-1)
#         loss=-loss
#         return tf.reduce_mean(loss)


# class DistibutionAgnosticSeesawLossWithLogits(tf.keras.layers.Layer):
#     def __init__(self, p=0.8, q=2, num_labels=2):
#         super(DistibutionAgnosticSeesawLossWithLogits, self).__init__()
#         self.eps = 1.0e-6
#         self.p = p
#         self.q = q
#         appearance = None
#         self.num_labels = num_labels
#
#     def call(self, logits, targets):
#
#         m_conditions = appearance[:, tf.newaxis] > appearance[tf.newaxis, :]
#         m_trues = (appearance[tf.newaxis, :] / appearance[:, tf.newaxis]) ** self.p
#         m_falses = tf.ones((len(appearance), len(appearance)), dtype=tf.float32)
#         m = tf.where(m_conditions, m_trues, m_falses)
#
#         # Compensation Factor
#         # only error sample need to compute Compensation Factor
#         probility = tf.nn.softmax(logits, axis=-1)
#         c_condition = probility / (tf.reduce_sum(probility * targets, axis=-1)[:, tf.newaxis])
#         c_condition = tf.stack([c_condition] * targets.shape[-1], axis=1)
#         c_condition = c_condition * targets[:, :, tf.newaxis]
#         falses = tf.ones(c_condition.shape, dtype=tf.float32)
#         c = tf.where(c_condition > 1, c_condition ** self.q, falses)
#
#         # Sij = Mij * Cij
#         s = m[tf.newaxis, :, :] * c
#         # softmax trick to prevent overflow
#         max_element = tf.reduce_max(logits, axis=-1)
#         logits = logits - max_element[:, tf.newaxis]
#         numerator = tf.exp(logits)
#         denominator = (
#                               (1 - targets)[:, tf.newaxis, :]
#                               * s[tf.newaxis, :, :]
#                               * tf.exp(logits)[:, tf.newaxis, :]).sum(axis=-1) \
#                       + tf.exp(logits)
#
#         sigma = numerator / (denominator + self.eps)
#         loss = (- targets * tf.math.log(sigma + self.eps)).sum(-1)
#         return tf.reduce_mean(loss)

# def local_plus_global_plus_L2_loss(num_classes_list, trainable_variables, l2_reg_lambda):
#     def real_loss(y_true, y_pred):
#         # loss = local+global+l2
#         # local=lo1+lo2+lo3
#         # lo1=binary_cross_entropy's sum 's mean in a batch
#
#
#         losses_1 = binary_crossentropy(
#             y_true[:, :num_classes_list[0]],
#             y_pred[:, :num_classes_list[0]])
#         losses_1 = mean(sum(losses_1))
#         # losses_1 = tf.reduce_mean(losses_1)
#
#         losses_2 = binary_crossentropy(
#             y_true[:, num_classes_list[0]:num_classes_list[0] + num_classes_list[1]],
#             y_pred[:, num_classes_list[0]:num_classes_list[0] + num_classes_list[1]])
#         losses_2 = mean(sum(losses_2))
#
#         losses_3 = binary_crossentropy(
#             y_true[:, num_classes_list[0] + num_classes_list[1]:],
#             y_pred[:, num_classes_list[0] + num_classes_list[1]:])
#         losses_3 = mean(sum(losses_3))
#
#         local_losses = losses_1 + losses_2 + losses_3
#
#         # Global Loss
#         global_losses = binary_crossentropy(y_true, y_pred)
#         global_losses = mean(sum(global_losses))
#
#         # L2 Loss
#         l2_losses = tf.add_n(
#             [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in trainable_variables]) * l2_reg_lambda
#
#         total_loss = local_losses + global_losses +l2_losses
#         return total_loss
#
#         # global版本
#         # 跷跷板seesaw loss
#         # p = 2
#         # q = 2.0
#         # # Calculate balance and penalty factors
#         # appearance = tf.reduce_sum(y_true, axis=0)  # Count of each label in y_true
#         # M = tf.where(tf.math.less_equal(appearance[:, tf.newaxis], appearance), tf.ones_like(appearance),
#         #              (appearance[:, tf.newaxis] / appearance) ** p)
#         # print("M is ",M)
#         # T=tf.where(tf.math.less_equal(y_pred[:, ], y_pred[:]), tf.ones_like(y_pred), (y_pred[:, ] / y_pred[:]) ** q)
#         # print("T is ",T)
#         # # Calculate S_ij values using balance and penalty factors
#         # S = M * T
#         # print("S is ",S)
#         #
#         # # Apply sigmoid function to y_pred and prevent overflow
#         # epsilon = 1.0e-6
#         # y_pred = tf.clip_by_value(y_pred, epsilon, 1.0 - epsilon)
#         # # S转置  列是一个样本看待其他样本的权重
#         # # (94i,94j)->(94j,94i)
#         # S=tf.transpose(S)
#         # # (none,94)*(94j,94i)->(none,94i)
#         # sum_should_add=tf.matmul(y_pred,S)
#         # # Calculate new y_pred using S_ij values
#         # y_pred = y_pred / (sum_should_add+y_pred)
#         # # Compute cross entropy from probabilities
#         # seesaw = y_true * tf.math.log(y_pred + epsilon) + (1 - y_true) * tf.math.log(1 - y_pred + epsilon)
#         #
#         # return -tf.reduce_mean(tf.reduce_sum(seesaw, axis=-1))
#
#
#         # new_y_pred=[]
#         #
#         # for bt in range(y_true.shape[0]):
#         #     y_true_one = y_true[bt]
#         #     y_pred_one = y_pred[bt]
#         #     # # y_true中的标签计数
#         #     for i in range(y_true_one.shape[0]):
#         #         if y_true_one[i] == 1:
#         #             # 数量+1
#         #             appearance[i] += 1
#         #     # 跷跷板seesaw loss
#         #     # S_ij=M_ij*C_ij
#         #     # M_ij= 1 if Ni<=Nj else =(Nj/Ni)e+p
#         #     S = [[]] * y_true_one.shape[0]
#         #     # 现在正确分类是i
#         #     for i in range(y_true_one.shape[0]):
#         #         #  其他分类的概率
#         #         for j in range(y_true_one.shape[0]):
#         #             if i == j:
#         #                 # 自己对自己 无效 方便后面运算
#         #                 S[i].append(0)
#         #                 continue
#         #             # 平衡因子：
#         #             # i对于j少，那么给i的值大;
#         #             # i对于j多，那么给i的值小
#         #             M_ij = 1 if appearance[i] <= appearance[j] else (appearance[j] / appearance[i]) ** p
#         #             # 惩罚因子：
#         #             # 分类正确，是i，那么给i的值为1；
#         #             # 分类错误，不是i，那么给i的值更大(惩罚更大)
#         #             C_ij = 1 if y_pred_one[i] >= y_pred_one[j] else (y_pred_one[j] / y_pred_one[i]) ** q
#         #
#         #             S[i].append(M_ij*C_ij)
#         #
#         #     #  sigmoid还原logit：可以做softmax版本的
#         #
#         #     # 1. 防止log过大过小
#         #     epsilon_ = _constant_to_tensor(epsilon(), y_pred_one.dtype.base_dtype)
#         #     y_pred_one = clip_ops.clip_by_value(y_pred_one, epsilon_, 1. - epsilon_)
#         #
#         #     # 2.重做y_pred
#         #     sum_pred = sum(y_pred_one)
#         #     y_pred_one = [j * sum_pred for j in y_pred_one]
#         #     # 3.构造新的
#         #     for i in range(y_true_one.shape[0]):
#         #         # sum but you
#         #         new_sum_except_curr = y_pred_one * math_ops.add(S[i], 0)
#         #         # resign
#         #         y_pred_one[i] = y_pred_one[i] / (new_sum_except_curr + y_pred_one[i])
#         #
#         #     new_y_pred.append(y_pred_one)
#         #
#         # new_y_pred=tf.convert_to_tensor(new_y_pred)
#         # # Compute cross entropy from probabilities.
#         # seesaw = y_true * math_ops.log(new_y_pred + epsilon())
#         # seesaw += (1 - y_true) * math_ops.log(1 - new_y_pred + epsilon())
#         # return -seesaw
#
#     return real_loss

# def lstm_layer(bert_embedding, lstm_hidden_size):
#     # 输入层，输入数据维度是 (batch_size, sequence_length-2, bert_hidden_)
#
#     # 创建前向和后向LSTM单元
#     drop_rate=config.drop_rate
#     lstm_fw_cell = LSTM(lstm_hidden_size, return_sequences=True,dropout=drop_rate)
#     lstm_bw_cell = LSTM(lstm_hidden_size, return_sequences=True, go_backwards=True,dropout=drop_rate)
#
#     # 创建双向LSTM层
#     # 全输出 (outputs_fw, outputs_bw)
#     # outputs_fw：(batch_size, sequence_length, lstm_hidden_size)
#     # outputs_bw：(batch_size, sequence_length, lstm_hidden_size)
#     bi_lstm_out = Bidirectional(lstm_fw_cell, backward_layer=lstm_bw_cell)(bert_embedding)
#
#     word_embedding=tf.concat(bi_lstm_out,axis=2)# (batch_size, sequence_length-2, lstm_hidden_size*2)
#
#     # 池化层，这里使用全局平均池化
#     # cls_cnn_feature = GlobalAveragePooling1D()(word_embedding)
#     # 意思同上
#     cls_cnn_feature=tf.reduce_mean(word_embedding,axis=1)# (batch_size, lstm_hidden_size*2)
#
#     return word_embedding,cls_cnn_feature

# def attention_layer(embedding, num_classes, attention_unit_size, name=""):
#     #  input_x:
#     # (batch_size,max_len,embedding)
#     embedding_size = embedding.shape[-1]
#     # (batch_size,att,embedding)
#     W_s1 = tf.Variable(tf.random.truncated_normal(
#         shape=[attention_unit_size, embedding_size],
#         stddev=0.1,
#         name="W_s1"
#     ))
#     # Create W_s2
#     # (batch_size,cls,att)
#     W_s2 = tf.Variable(tf.random.truncated_normal(
#         shape=[num_classes, attention_unit_size],
#         stddev=0.1,
#         name="W_s2"
#     ))
#     # 计算input_x的注意力矩阵
#     # 输入(batch_size,max_len,embedding)
#     # 输出(batch_size,cls,max_len)
#     # 将输入和W_s1进行矩阵乘法，然后应用tanh激活函数
#     # (batch_size,att,embedding)  *  (batch_size,embedding,max_len) = (batch_size,att,max_len)
#     attention_matrix = tf.matmul(W_s1, tf.transpose(embedding, perm=[0, 2, 1]))
#     attention_matrix = Lambda(lambda x: tf.nn.tanh(x))(attention_matrix)
#     # 将W_s2和注意力矩阵进行矩阵乘法
#     # (batch_size,cls,att) * (batch_size,att,max_len) = (batch_size,cls,max_len)
#     attention_matrix = tf.matmul(W_s2, attention_matrix)
#
#     # 计算注意力权重和输出
#     # attention_weight: [batch_size, cls, max_len]
#     attention_weight =\
#         Lambda(lambda x: tf.nn.softmax(x,axis=1), name=name + "attention")(attention_matrix)
#     # [batch_size,cls,max_len]*[batch_size,max_len,embedding]=[batch_size,cls,embedding]
#     attention_out =  tf.matmul(attention_weight, embedding)
#     # [batch_size,cls,embedding]-> [batch_size, embedding]
#     attention_out = Lambda(lambda x: tf.reduce_mean(x, axis=1))(attention_out)
#
#     return attention_weight, attention_out

# def fc_layer(x, name=""):
#     # return Dense(units=config.fc_size, activation='relu',name=name+"fc")(x)  # shape=[batch_size,fc_size]
#     num_units = x.get_shape().as_list()[-1]
#     W = tf.Variable(tf.random.truncated_normal(shape=[num_units, config.fc_size],
#                                         stddev=0.1, dtype=tf.float32), name="W")
#     b = tf.Variable(tf.constant(value=0.1, shape=[config.fc_size], dtype=tf.float32), name="b")
#     fc = xw_plus_b(x, W, b)
#     fc_out = tf.nn.relu(fc)
#
#     return fc_out
# def local_layer(local_fc, input_att_weight, num_classes, name=""):
#
#     # 旧版了 logit
#     # logits = Dense(num_classes, kernel_initializer='truncated_normal', bias_initializer=tf.constant_initializer(0.1))(local_fc)
#     # score = tf.keras.activations.sigmoid(logits)
#
#
#     num_units = local_fc.get_shape().as_list()[-1]
#
#     W = tf.Variable(tf.keras.backend.truncated_normal(shape=[num_units, num_classes],
#                                         stddev=0.1, dtype=tf.float32), name="W")
#     b = tf.Variable(tf.constant(value=0.1, shape=[num_classes], dtype=tf.float32), name="b")
#
#     logits = xw_plus_b(local_fc, W, b, name=name+"logits")
#     score = tf.sigmoid(logits, name=name+"scores")
#
#     scores_expanded = Lambda(lambda x: tf.expand_dims(x, -1))(score)
#     visual = Multiply()([input_att_weight, scores_expanded])
#     # softmax 概率和=1，每个标签出现符合概率比例
#     visual = Softmax(axis=1)(visual)
#     visual = Lambda(lambda x: tf.reduce_mean(x, axis=1),name=name+"visual")(visual)
#
#     return score,visual

# def global_layer(x,num_classes,name=""):
#     # 在Dense层中包括W和b
#     logits = Dense(num_classes, kernel_initializer='truncated_normal', bias_initializer=tf.constant_initializer(0.1),name=name+"logits")(x)
#     scores = tf.keras.activations.sigmoid(logits)
#     return logits,scores

# def linear_layer(x, output_size, initializer=None, scope="SimpleLinear"):
#     # linear = Dense(output_size, kernel_initializer=initializer, name=scope)(x)
#     # return linear
#
#     shape = x.get_shape().as_list()
#     if len(shape) != 2:
#         raise ValueError("Linear is expecting 2D arguments: {0}".format(str(shape)))
#     if not shape[1]:
#         raise ValueError("Linear expects shape[1] of arguments: {0}".format(str(shape)))
#     input_size = shape[1]
#
#     # Now the computation.
#     with variable_scope(scope):
#         W = get_variable("W", [input_size, output_size], dtype=x.dtype)
#         b = get_variable("b", [output_size], dtype=x.dtype, initializer=initializer)
#
#     return xw_plus_b(x, W, b)


#
# def highway_layer(x, size, num_layers=1, bias=-2.0,name='highway'):
#     # global
#     # input_:(batch_size,lstm_hidden_*2)
#
#     for idx in range(num_layers):
#         # h = Dense(size, name="highway_h_{0}".format(idx))(x)
#         h = Activation('relu')(linear_layer(x,size,scope=("highway_h_{0}".format(idx))))
#
#         # t = Dense(size, kernel_initializer=tf.constant_initializer(bias), name="highway_t_{0}".format(idx))(x)
#         t = Activation('tanh')(linear_layer(x,size,initializer=tf.constant_initializer(bias),
#                                           scope=("highway_t_{0}".format(idx))))
#
#         transformed_h = Multiply()([t, h])
#         carry_gate = Lambda(lambda x: 1.0 - x)(t)
#         transformed_x = Multiply()([carry_gate, x])
#
#         x = Add(name=name+str(idx+1))([transformed_h, transformed_x])
#
#     return x


if __name__ == "__main__":
    # 可以单独运行用于查看模型结构输出模型结构图
    config_path = config.bert_config_path
    checkpoint_path = config.bert_checkpoint_path
    # model = build_bert_textcnn_model(config_path, checkpoint_path, 65)
    model = build_bert_textcnn_model(config_path, checkpoint_path, [20, 30, 40])
    model.summary()
    # keras.utils.plot_model(model, to_file='./model/model.png', show_shapes=True)
