import tensorflow as tf
import math
from sklearn.metrics import precision_score, recall_score

from src.network.confusionMatrix import precision_recall

slim = tf.contrib.slim

##in_dim --->输入特征图通道数
##on_dim --->输出特征图通道数
def senet_blob(net, in_dim, on_dim, stride):
    bk = net

    net = slim.conv2d(net, in_dim // 4, [1, 1], activation_fn=None)
    ##没有单独定义relu和BN
    net = slim.conv2d(net, in_dim // 4, [3, 3])
    net = slim.conv2d(net, on_dim, [1, 1], activation_fn=None)
    if stride > 1:
        net = slim.avg_pool2d(net, [stride * 2 - 1, stride * 2 - 1],
                              stride=stride, padding="SAME")

        bk = slim.avg_pool2d(bk, [stride * 2 - 1, stride * 2 - 1],
                             stride=stride, padding="SAME")

    if in_dim != on_dim:
        bk = slim.conv2d(bk, on_dim, [1, 1], activation_fn=None)

    ##NHWC
    sq = tf.reduce_mean(net, axis=[1, 2])
    ex = slim.fully_connected(sq, on_dim // 16)
    ex = tf.nn.relu(ex)
    ##batchsize*on_dim
    ex = slim.fully_connected(ex, on_dim)
    ex = tf.nn.sigmoid(ex)
    net = net * tf.reshape(ex, [-1, 1, 1, on_dim])
    ##跳连的部分
    net = bk + net
    return net


def SENet(input_x, is_training=True, keep_prob=0.8):
    ##resnet + SENet

    bn_param = {
        'is_training': is_training,
        'decay': 0.997,
        'epsilon': 1e-5,
        'scale': True,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }

    with slim.arg_scope([slim.conv2d],
                        weights_regularizer=slim.l2_regularizer(0.00001),
                        weights_initializer=slim.variance_scaling_initializer(),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=bn_param):
        with slim.arg_scope([slim.batch_norm], **bn_param):
            net = slim.conv2d(input_x, 32, [3, 3])
            print(net)
            net = slim.avg_pool2d(net, [3, 3], stride=2, padding="SAME")
            print(net)

            net = senet_blob(net, 32, 64, 2)
            print(net)

            net = senet_blob(net, 64, 128, 2)
            print(net)

            net = senet_blob(net, 128, 128, 2)
            print(net)

            net = senet_blob(net, 128, 256, 2)
            print(net)

            net = senet_blob(net, 256, 512, 2)
            print(net)
            net = tf.reduce_mean(net, axis=[1, 2])
            print(net)
            net = slim.fully_connected(net, 1024)
            net = tf.nn.dropout(net, keep_prob=keep_prob)
            print(net)
            net = tf.nn.relu(net)
            net = slim.fully_connected(net, 3)
            print(net)

            return net


def CNNNet(input_x, is_training=True, keep_prob=0.8):
    ##resnet + SENet

    bn_param = {
        'is_training': is_training,
        'decay': 0.997,
        'epsilon': 1e-5,
        'scale': True,
        'updates_collections': tf.GraphKeys.UPDATE_OPS
    }

    with slim.arg_scope([slim.conv2d],
                        weights_regularizer=slim.l2_regularizer(0.00001),
                        weights_initializer=slim.variance_scaling_initializer(),
                        activation_fn=tf.nn.relu,
                        normalizer_fn=slim.batch_norm,
                        normalizer_params=bn_param):
        with slim.arg_scope([slim.batch_norm], **bn_param):
            net = slim.conv2d(input_x, 32, [3, 3])
            print(net)
            net = slim.avg_pool2d(net, [3, 3], stride=2, padding="SAME")
            print(net)

            net = slim.conv2d(net, 64, [3, 3])
            net = slim.conv2d(net, 64, [3, 3])
            net = slim.avg_pool2d(net, [3, 3], stride=2, padding="SAME")
            # net = senet_blob(net, 32, 64, 2)
            print(net)

            net = slim.conv2d(net, 64, [3, 3])
            net = slim.conv2d(net, 64, [3, 3])
            net = slim.avg_pool2d(net, [3, 3], stride=2, padding="SAME")
            # net = senet_blob(net, 64, 128, 2)
            print(net)

            net = slim.conv2d(net, 128, [3, 3])
            net = slim.avg_pool2d(net, [3, 3], stride=2, padding="SAME")
            # net = senet_blob(net, 128, 128, 2)
            # net = senet_blob(net, 128, 256, 2)
            print(net)
            net = slim.conv2d(net, 256, [3, 3])
            # net = senet_blob(net, 256, 512, 2)
            print(net)
            net = tf.reduce_mean(net, axis=[1, 2])
            print(net)
            net = slim.fully_connected(net, 1024)
            net = tf.nn.dropout(net, keep_prob=keep_prob)
            print(net)
            net = tf.nn.relu(net)
            net = slim.fully_connected(net, 3)
            print(net)

            return net


def SeNet_model(hps, one_demin,two_demin):
    # net
    inputs = tf.placeholder(tf.float32, [None, one_demin, two_demin, 1], name="inputs")
    outputs = tf.placeholder(tf.int32, [None], name="outputs")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    is_training = tf.placeholder(dtype=tf.bool, name="is_training")
    global_step = tf.Variable(
        tf.zeros([], tf.int64), name="global_step", trainable=False)
    out_weights = tf.Variable(
        tf.truncated_normal([32, 3]))

    # logits = CNNNet(input_x, is_training=True, keep_prob=0.8)
    y_ = SENet(inputs, is_training=is_training, keep_prob=keep_prob)

    # loss
    #loss = tf.losses.mean_squared_error(outputs, y_)
    # diff = logits - label
    # abs_diff = abs(diff)
    # abs_diff_lt = tf.less(abs_diff, 1)
    # loss = tf.reduce_mean(tf.where(abs_diff_lt, 0.5 * tf.square(abs_diff), abs_diff - 0.5))

    # learn
    out = tf.layers.batch_normalization(y_, training=is_training)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    # bool
    y_pred = tf.argmax(y_, 1, output_type=tf.int32, name="y_pred_model")  # 取第二维上取最大值
    # [1,0,1,1,1,0,0,0]
    correct_pred = tf.equal(y_pred, outputs)
    accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
    learning_rate = tf.train.exponential_decay(learning_rate=hps.learning_rate,  # 初始化的学习率
                                               global_step=global_step,
                                               # 用于衰减计算的全局步骤。 一定不为负数。喂入一次 BACTH_SIZE 计为一次 global_step
                                               decay_steps=20,  # 衰减速度，一定不能为负数，每间隔decay_steps次更新一次learning_rate值
                                               decay_rate=0.96  # 衰减系数，衰减速率，其具体意义参看函数计算方程(对应α^t中的α)。
                                               )
    """
    embeddings = tf.nn.l2_normalize(y_, 1, 1e-10, name='embeddings')
    triplet_loss = tf.contrib.losses.metric_learning.triplet_semihard_loss(
        labels=outputs,
        embeddings=embeddings,
        margin=1.0
    )
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    loss = tf.add_n([triplet_loss] + regularization_losses, name='total_loss')
    """
    with tf.control_dependencies(update_ops):
        loss = tf.reduce_mean(
            tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=out) + 0.01 * tf.nn.l2_loss(out_weights))
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step, name="trainop")

    confusion_matrix = tf.contrib.metrics.confusion_matrix(y_pred, outputs, num_classes=hps.num_classes,
                                                           dtype=tf.int32, name="confusion_matrix")

    y_sensitivity_n, y_specificity_n, y_sensitivity_a, y_specificity_a, y_sensitivity_c, y_specificity_c = precision_recall(
        confusion_matrix)
    y_sensitivity_n_summary = tf.summary.scalar("y_sensitivity_n", y_sensitivity_n)
    y_specificity_n_summary = tf.summary.scalar("y_specificity_n", y_specificity_n)
    y_sensitivity_a_summary = tf.summary.scalar("y_sensitivity_a", y_sensitivity_a)
    y_specificity_a_summary = tf.summary.scalar("y_specificity_a", y_specificity_a)
    y_sensitivity_c_summary = tf.summary.scalar("y_sensitivity_c", y_sensitivity_c)
    y_specificity_c_summary = tf.summary.scalar("y_specificity_c", y_specificity_c)

    loss_summary = tf.summary.scalar("loss", loss)
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate)
    # merged_summary = tf.summary.merge_all()  # 将所有的summary的值都合并到一起
    merged_summary = tf.summary.merge(
        [loss_summary, accuracy_summary, learning_rate_summary, y_sensitivity_n_summary, y_specificity_n_summary,
         y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary, y_specificity_c_summary],
        name="merged_summary")
    merged_summary_test = tf.summary.merge([accuracy_summary, y_sensitivity_n_summary, y_specificity_n_summary,
                                            y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary,
                                            y_specificity_c_summary], name="merged_summary_test")
    return ((inputs, outputs, keep_prob, is_training),
            (loss, accuracy),
            (train_op, global_step, merged_summary, merged_summary_test))
