import sys

sys.path.append("/home/zxh/otu_classifier/")
import tensorflow as tf

from src.network.confusionMatrix import precision_recall


def separable_conv_block(x, output_channel_number, name):
    """separable_conv block implementation"""
    """
    Args:
    - x: 输入数据
    - output_channel_number: 经过深度可分离卷积之后，再经过1*1 的卷积生成的通道数目
    - name: 每组的卷积命名
    """
    # variable_scope 在这个scope下命名不会有冲突 conv1 = 'conv1' => scope_name/conv1
    with tf.variable_scope(name):
        input_channel = x.get_shape().as_list()[-1]
        # 将x 在 第四个维度（axis+1） 上 拆分成 input_channel 份
        # channel_wise_x: [channel1, channel2, ...]
        channel_wise_x = tf.split(x, input_channel, axis=3)
        output_channels = []
        for i in range(len(channel_wise_x)):
            output_channel = tf.layers.conv2d(channel_wise_x[i],
                                              1,
                                              (3, 3),
                                              strides=(1, 1),
                                              padding='same',
                                              activation=tf.nn.relu,
                                              name='conv_%d' % i)
            output_channels.append(output_channel)
        concat_layers = tf.concat(output_channels, axis=3)
        conv1_1 = tf.layers.conv2d(concat_layers,
                                   output_channel_number,
                                   (1, 1),
                                   strides=(1, 1),
                                   padding='same',
                                   activation=tf.nn.relu,
                                   name='conv1_1')
        return conv1_1


def mobileNet(hps):
    # [None, 3072]
    inputs = tf.placeholder(tf.float32, [None, 20, 26, 1], name="inputs")
    outputs = tf.placeholder(tf.int32, [None], name="outputs")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")
    is_training = tf.placeholder(dtype=tf.bool, name="is_training")
    global_step = tf.Variable(
        tf.zeros([], tf.int64), name="global_step", trainable=False)
    out_weights = tf.Variable(
        tf.truncated_normal([32, 3]))

    # conv1: 神经元图，feature_map，输出图像
    conv1 = tf.layers.conv2d(inputs,
                             32,  # output channel number
                             (3, 3),  # kernel size
                             padding='same',
                             activation=tf.nn.relu,
                             name='conv1')
    # 16*16
    pooling1 = tf.layers.max_pooling2d(conv1,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool1")
    separable_2a = separable_conv_block(pooling1,
                                        32,
                                        name="separable_2a")
    separable_2b = separable_conv_block(separable_2a,
                                        32,
                                        name="separable_2b")
    pooling2 = tf.layers.max_pooling2d(separable_2b,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool2")
    separable_3a = separable_conv_block(pooling2,
                                        32,
                                        name="separable_3a")
    separable_3b = separable_conv_block(separable_3a,
                                        32,
                                        name="separable_3b")
    pooling3 = tf.layers.max_pooling2d(separable_3b,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool3")
    separable_4a = separable_conv_block(pooling3,
                                        32,
                                        name="separable_4a")
    separable_4b = separable_conv_block(separable_4a,
                                        32,
                                        name="separable_4b")
    pooling4 = tf.layers.max_pooling2d(separable_4b,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool4")
    separable_5a = separable_conv_block(pooling4,
                                        32,
                                        name="separable_5a")
    separable_5b = separable_conv_block(separable_5a,
                                        32,
                                        name="separable_5b")
    pooling5 = tf.layers.max_pooling2d(separable_5b,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool5")
    separable_6a = separable_conv_block(pooling5,
                                        32,
                                        name="separable_6a")
    separable_6b = separable_conv_block(separable_6a,
                                        32,
                                        name="separable_6b")
    pooling6 = tf.layers.max_pooling2d(separable_6b,
                                       (2, 2),  # kernel size
                                       (2, 2),  # stride
                                       padding='same',
                                       name="pool6")
    flatten = tf.layers.flatten(pooling6)
    y_ = tf.layers.dense(flatten, 3, name="fc/fc2")

    """交叉熵损失函数"""
    out = tf.layers.batch_normalization(y_, training=is_training)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        loss = tf.reduce_mean(
            tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=out) + 0.01 * tf.nn.l2_loss(out_weights))
    # loss = tf.losses.sparse_softmax_cross_entropy(labels=outputs, logits=y_)

    # bool
    y_pred = tf.argmax(y_, 1, output_type=tf.int32, name="y_pred_model")  # 取第二维上取最大值
    # [1,0,1,1,1,0,0,0]
    correct_prediction = tf.equal(y_pred, outputs)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64))

    with tf.name_scope('train_op'):
        # 设置动态变化的学习率
        learning_rate = tf.train.exponential_decay(learning_rate=hps.learning_rate,  # 初始化的学习率
                                                   global_step=global_step,
                                                   # 用于衰减计算的全局步骤。 一定不为负数。喂入一次 BACTH_SIZE 计为一次 global_step
                                                   decay_steps=20,  # 衰减速度，一定不能为负数，每间隔decay_steps次更新一次learning_rate值
                                                   decay_rate=0.96  # 衰减系数，衰减速率，其具体意义参看函数计算方程(对应α^t中的α)。
                                                   )
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step, name="trainop")

    confusion_matrix = tf.contrib.metrics.confusion_matrix(y_pred, outputs, num_classes=3,
                                                           dtype=tf.int32, name="confusion_matrix")

    y_sensitivity_n, y_specificity_n, y_sensitivity_a, y_specificity_a, y_sensitivity_c, y_specificity_c = precision_recall(
        confusion_matrix)
    y_sensitivity_n_summary = tf.summary.scalar("y_sensitivity_n", y_sensitivity_n)
    y_specificity_n_summary = tf.summary.scalar("y_specificity_n", y_specificity_n)
    y_sensitivity_a_summary = tf.summary.scalar("y_sensitivity_a", y_sensitivity_a)
    y_specificity_a_summary = tf.summary.scalar("y_specificity_a", y_specificity_a)
    y_sensitivity_c_summary = tf.summary.scalar("y_sensitivity_c", y_sensitivity_c)
    y_specificity_c_summary = tf.summary.scalar("y_specificity_c", y_specificity_c)

    loss_summary = tf.summary.scalar("loss", loss)
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate)
    # merged_summary = tf.summary.merge_all()  # 将所有的summary的值都合并到一起
    merged_summary = tf.summary.merge(
        [loss_summary, accuracy_summary, learning_rate_summary, y_sensitivity_n_summary, y_specificity_n_summary,
         y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary, y_specificity_c_summary],
        name="merged_summary")
    merged_summary_test = tf.summary.merge([accuracy_summary, y_sensitivity_n_summary, y_specificity_n_summary,
                                            y_sensitivity_a_summary, y_specificity_a_summary, y_sensitivity_c_summary,
                                            y_specificity_c_summary], name="merged_summary_test")
    return ((inputs, outputs, keep_prob, is_training),
            (loss, accuracy),
            (train_op, global_step, merged_summary, merged_summary_test))
