from tensorflow.keras.layers import Layer
import tensorflow.keras.backend as K


class LayerWithLoss(Layer):
    """特殊的层，用来定义复杂loss
        """
    def __init__(self,output_axis=None,**kwargs):
        super(LayerWithLoss,self).__init__(**kwargs)
        self.output_axis = output_axis

    def call(self,inputs,mask=None):
        loss = self.compute_loss(inputs,mask)
        self.add_loss(loss)
        if self.output_axis is None:
            return inputs
        elif isinstance(self.output_axis,list):
            return [inputs[i] for i in self.output_axis]
        else:
            return inputs[self.output_axis]



    def compute_loss(self,inputs,mask=None):
        raise NotImplementedError

    def compute_output_shape(self, input_shape):
        if self.output_axis is None:
            return input_shape
        elif isinstance(self.output_axis,list):
            return [input_shape[i] for i in self.output_axis]
        else:
            return input_shape[self.output_axis]

    def get_config(self):
        config = {
            'output_axis':self.output_axis
        }
        base_config = super(LayerWithLoss,self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

class LanguageModelLossLayer(LayerWithLoss):
    """交叉熵作为loss，并mask掉padding部分
        """
    def compute_loss(self, inputs, mask=None):
        if len(inputs) == 3:
            y_true, y_mask, y_pred = inputs
            y_mask = y_mask[:, 1:]  # segment_ids，刚好指示了要预测的部分
        else:
            y_true,y_pred = inputs
            if  mask == None or mask[1] is None:
                y_mask = 1.0
            else:
                y_mask = K.cast(mask[1],K.floatx())[:,1:]

        y_true = y_true[:,1:]
        y_pred = y_pred[:,:-1]
        loss = K.sparse_categorical_crossentropy(y_true,y_pred)
        loss = K.sum(loss * y_mask) / K.sum(y_mask)
        return loss


class SimbertModelLossLayer(LayerWithLoss):
    def compute_loss(self, inputs, mask=None):
        loss1 = self.compute_loss_of_seq2seq(inputs, mask)
        loss2 = self.compute_loss_of_similarity(inputs, mask)
        self.add_metric(loss1, name='seq2seq_loss')
        self.add_metric(loss2, name='similarity_loss')
        return loss1 + loss2

    def compute_loss_of_seq2seq(self,inputs,mask=None):
        y_true,y_mask,_,y_pred = inputs
        y_true = y_true[:,1:]   # 目标token_ids
        y_mask = y_mask[:,1:]   # segment_ids，刚好指示了要预测的部分
        y_pred = y_pred[:,:-1]  # 预测序列，错开一位
        loss = K.sparse_categorical_crossentropy(y_true,y_pred)
        loss = K.sum(loss * y_mask) /K.sum(y_mask)
        return loss

    def get_laberls_of_similary(self,y_pred):
        idxs = K.arange(0,K.shape(y_pred)[0])
        idx_1 = idxs[None,:]
        idx_2 = (idxs + 1 - idxs %2 *2)[:,None]
        labels = K.equal(idx_1,idx_2)
        labels = K.cast(labels,K.floatx())
        return labels

    def compute_loss_of_similarity(self,inputs,mask=None):
        _,_,y_pred,_ = inputs
        y_true = self.get_laberls_of_similary(y_pred)
        y_pred = K.l2_normalize(y_pred,axis=1)
        similarities = K.dot(y_pred,K.transpose(y_pred))
        idxs = K.arange(0, K.shape(y_pred)[0])
        idxs_1 = idxs[None, :]
        idxs_2 = idxs[:, None]
        labels = K.cast(K.equal(idxs_1, idxs_2), K.floatx()) * 1e12
        # d = keras.layers.Lambda(lambda x: K.eye(K.shape(x)[0]) * 1e12)
        similarities = similarities - labels  # - d(y_pred)#K.eye(K.shape(y_pred)[0]) * 1e12  # 排除对角线
        similarities = similarities * 30  # scale
        loss = K.categorical_crossentropy(
            y_true, similarities, from_logits=True
        )
        return loss


