import numpy as np
import tensorflow as tf
from keras.layers import Concatenate, Input, Lambda, UpSampling2D
from keras.models import Model
from tensorflow._api.v1 import dtypes
from utils.utils import compose

def _categorical_ghm_loss(bins=30, momentum=0.75):
    """ 返回多分类 GHM 损失函数：
            把每个区间上的梯度做平均，也就是说把梯度拉平，回推到公式上等价于把loss做平均
    Formula:
        loss = sum(crossentropy_loss(p_i,p*_i) / GD(g_i))
        GD(g) = S_ind(g) / delta = S_ind(g) * M
        S_ind(g) = momentum * S_ind(g) + (1 - momentum) * R_ind(g)
        R_ind(g)是 g=|p-p*| 所在梯度区间[(i-1)delta, i*delta]的样本数
        M = 1/delta，这个是个常数，理论上去掉只有步长影响
    Parameters: （论文默认）
        bins -- 区间个数，default 30
        momentum -- 使用移动平均来求区间内样本数，动量部分系数，论文说不敏感
    """
    # 区间边界 edges=[0,0.0333,0.0667,1,0.1333,0.1667......0.9667,1],31个点
    edges = np.array([i/bins for i in range(bins + 1)])
    edges = np.expand_dims(np.expand_dims(edges, axis=-1), axis=-1)
    acc_sum = 0
    if momentum > 0:
        acc_sum = tf.zeros(shape=(bins,), dtype=tf.float32)

    def ghm_class_loss(y_truth, y_pred, valid_mask):
        epsilon = keras.backend.epsilon()
        # 逐元素clip(修剪)，将超出范围的数强制变为边界值
        y_pred = keras.backend.clip(y_pred, epsilon, 1.0 - epsilon)
        # 0. 计算本次mini-batch的梯度分布：R_ind(g)
        gradient = keras.backend.abs(y_truth - y_pred)
        # 获取概率最大的类别下标，将该类别的梯度做为该标签的梯度代表
        # 没有这部分就是每个类别的梯度都参与到GHM，实验表明没有这部分会更好些
        # truth_indices_1 = keras.backend.expand_dims(keras.backend.argmax(y_truth, axis=1))
        # truth_indices_0 = keras.backend.expand_dims(keras.backend.arange(start=0,
        #                                                                  stop=tf.shape(y_pred)[0],
        #                                                                  step=1, dtype='int64'))
        # truth_indices = keras.backend.concatenate([truth_indices_0, truth_indices_1])
        # main_gradient = tf.gather_nd(gradient, truth_indices)
        # gradient = tf.tile(tf.expand_dims(main_gradient, axis=-1), [1, y_pred.shape[1]])

        # 求解各个梯度所在的区间，并落到对应区间内进行密度计数
        grads_bin = tf.logical_and(tf.greater_equal(gradient, edges[:-1, :, :]), tf.less(gradient, edges[1:, :, :]))
        valid_bin = tf.boolean_mask(grads_bin, valid_mask, name='valid_gradient', axis=1)
        valid_bin = tf.reduce_sum(tf.cast(valid_bin, dtype=tf.float32), axis=(1, 2))
        # 2. 更新指数移动平均后的梯度分布：S_ind(g)
        nonlocal acc_sum
        acc_sum = tf.add(momentum * acc_sum, (1 - momentum) * valid_bin, name='update_bin_number')
        # sample_num = tf.reduce_sum(acc_sum)  # 是否乘以总数，乘上效果反而变差了
        # 3. 计算本次mini-batch不同loss对应的梯度密度：GD(g)
        position = tf.slice(tf.where(grads_bin), [0, 1], [-1, 2])
        value = tf.gather_nd(acc_sum, tf.slice(tf.where(grads_bin), [0, 0], [-1, 1]))  # * bins
        grad_density = tf.sparse.SparseTensor(indices=position, values=value,
                                              dense_shape=tf.shape(gradient, out_type=tf.int64))
        grad_density = tf.sparse.to_dense(grad_density, validate_indices=False)
        grad_density = grad_density * tf.expand_dims(valid_mask, -1) + (1 - tf.expand_dims(valid_mask, -1))

        # 4. 计算本次mini-batch不同样本的损失：loss
        cross_entropy = -y_truth * keras.backend.log(y_pred)
        # loss = cross_entropy / grad_density * sample_num
        loss = cross_entropy / grad_density
        loss = keras.backend.sum(loss, axis=1)
        """
        # 调试用，打印tensor
        print_op = tf.print('acc_sum: ', acc_sum, 'n',
                            'grad_density: ', grad_density, 'n',
                            'cross_entropy: ', cross_entropy, 'n',
                            'loss:', loss, 'n',
                            'n',
                            '=================================================n',
                            summarize=100)
        with tf.control_dependencies([print_op]):
            return tf.identity(loss)
        """
        return loss
    return ghm_class_loss