import tensorflow as tf
import math
from sklearn.metrics import precision_score, recall_score

from src.network.confusionMatrix import precision_recall


def residual_block(x, output_channel):
    """residual connection implementation"""
    input_channel = x.get_shape().as_list()[-1]
    if input_channel * 2 == output_channel:
        increase_dim = True
        strides = (2, 2)
    elif input_channel == output_channel:
        increase_dim = False
        strides = (1, 1)
    else:
        raise Exception("input channel can't match output channel")
    conv1 = tf.layers.conv2d(x,
                             output_channel,
                             (3, 3),
                             strides=strides,
                             padding="same",
                             activation=tf.nn.relu,
                             name="conv1")
    conv2 = tf.layers.conv2d(conv1,
                             output_channel,
                             (3, 3),
                             strides=(1, 1),
                             padding="same",
                             activation=tf.nn.relu,
                             name="conv2")
    if increase_dim:
        # 采样形状和步长一样可以使得样本减少一半
        # [None,image_width,image_height,channel] -> [,,,channel*2]
        pooled_x = tf.layers.average_pooling2d(x,
                                               (2, 2),
                                               (2, 2),
                                               padding="same")
        padded_x = tf.pad(pooled_x,
                          [[0, 0],
                           [0, 0],
                           [0, 0],
                           [input_channel // 2, input_channel // 2]])
    else:
        padded_x = x
    output_x = conv2 + padded_x
    return output_x


# x 输入快，num_residual_blocks残差连接块，num_subsampling需要做多少次亚采样 num_filter_base通道数的一个base
# class_num为了泛化处理，添加这个可以接收多种不同的数据集
def res_net(x,
            num_residual_blocks,
            num_filter_base,
            class_num,
            keep_prob):
    """residual network implementation"""
    num_subsampling = len(num_residual_blocks)
    layers = []
    # x: [None,width,height,channel] -> [width,height,channel]
    input_size = x.get_shape().as_list()[1:]
    with tf.variable_scope("conv0"):
        conv0 = tf.layers.conv2d(x, num_filter_base, (3, 3), padding="same", strides=(1, 1), activation=tf.nn.relu,
                                 name="conv0")
        layers.append(conv0)
    for sample_id in range(num_subsampling):
        for i in range(num_residual_blocks[sample_id]):
            with tf.variable_scope("conv%d_%d" % (sample_id, i)):
                conv = residual_block(
                    layers[-1],
                    num_filter_base * (2 ** sample_id))
                layers.append(conv)
    multiplier = 2 ** (num_subsampling - 1)

    fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)
    with tf.variable_scope('fc', initializer=fc_init):
        # layer[-1].shape : [None,width,height,channel]
        # kernal_size: image_width,image_height
        global_pool = tf.reduce_mean(layers[-1], [1, 2])
        fc1 = tf.layers.dense(global_pool,
                              32,
                              activation=tf.nn.relu,
                              name='fc1')
        fc1_dropout = tf.contrib.layers.dropout(fc1, keep_prob)
        logits = tf.layers.dense(fc1_dropout,
                                 class_num,
                                 name='fc2')
        layers.append(logits)
    return layers[-1]


def model_resNet(hps, one_demin,two_demin):
    # [None, 3072]
    inputs = tf.placeholder(tf.float32, [None, one_demin, two_demin, 1], name="inputs")
    outputs = tf.placeholder(tf.int32, [None], name="outputs")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    is_training = tf.placeholder(dtype=tf.bool, name="is_training")
    global_step = tf.Variable(
        tf.zeros([], tf.int64), name="global_step", trainable=False)
    out_weights = tf.Variable(
        tf.truncated_normal([32, 3]))

    # [None, 4 * 4 * 32]
    y_ = res_net(inputs, [1, 3, 1], 32, hps.num_classes, keep_prob)

    """交叉熵损失函数"""
    # loss = tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=y_)
    # loss = tf.reduce_mean(
    #     tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=y_) + 0.01 * tf.nn.l2_loss(out_weights))

    out = tf.layers.batch_normalization(y_, training=is_training)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        loss = tf.reduce_mean(
            tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=out) + 0.01 * tf.nn.l2_loss(out_weights))

    # bool
    y_pred = tf.argmax(y_, 1, output_type=tf.int32, name="y_pred_model")  # 取第二维上取最大值
    # [1,0,1,1,1,0,0,0]
    correct_pred = tf.equal(y_pred, outputs)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    with tf.name_scope('train_op'):
        # 设置动态变化的学习率
        learning_rate = tf.train.exponential_decay(learning_rate=hps.learning_rate,  # 初始化的学习率
                                                   global_step=global_step,
                                                   # 用于衰减计算的全局步骤。 一定不为负数。喂入一次 BACTH_SIZE 计为一次 global_step
                                                   decay_steps=20,  # 衰减速度，一定不能为负数，每间隔decay_steps次更新一次learning_rate值
                                                   decay_rate=0.96  # 衰减系数，衰减速率，其具体意义参看函数计算方程(对应α^t中的α)。
                                                   )
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step, name="trainop")

    confusion_matrix = tf.contrib.metrics.confusion_matrix(y_pred, outputs, num_classes=hps.num_classes,
                                                           dtype=tf.int32, name="confusion_matrix")

    y_sensitivity_n, y_specificity_n, y_sensitivity_a, y_specificity_a, y_sensitivity_c, y_specificity_c = precision_recall(
        confusion_matrix)
    y_sensitivity_n_summary = tf.summary.scalar("y_sensitivity_n", y_sensitivity_n)
    y_specificity_n_summary = tf.summary.scalar("y_specificity_n", y_specificity_n)
    y_sensitivity_a_summary = tf.summary.scalar("y_sensitivity_a", y_sensitivity_a)
    y_specificity_a_summary = tf.summary.scalar("y_specificity_a", y_specificity_a)
    y_sensitivity_c_summary = tf.summary.scalar("y_sensitivity_c", y_sensitivity_c)
    y_specificity_c_summary = tf.summary.scalar("y_specificity_c", y_specificity_c)

    loss_summary = tf.summary.scalar("loss", loss)
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate)
    # merged_summary = tf.summary.merge_all()  # 将所有的summary的值都合并到一起
    merged_summary = tf.summary.merge(
        [loss_summary, accuracy_summary, learning_rate_summary, y_sensitivity_n_summary, y_specificity_n_summary,
         y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary, y_specificity_c_summary],
        name="merged_summary")
    merged_summary_test = tf.summary.merge([accuracy_summary, y_sensitivity_n_summary, y_specificity_n_summary,
                                            y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary,
                                            y_specificity_c_summary], name="merged_summary_test")
    return ((inputs, outputs, keep_prob, is_training),
            (loss, accuracy),
            (train_op, global_step, merged_summary, merged_summary_test))
