import math

import tensorflow as tf

from src.config import params


def create_model(hps, num_classes):
    num_timesteps = hps.num_timesteps
    batch_size = hps.batch_size

    inputs = tf.placeholder(tf.float32, (batch_size, num_timesteps), name="inputs")
    outputs = tf.placeholder(tf.int32, (batch_size,), name="outputs")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")  # dropout中保存多少
    # 计算到那一步
    global_step = tf.Variable(
        tf.zeros([], tf.int64), name="global_step", trainable=False)

    em_inputs = tf.expand_dims(inputs, -1)

    # lstm 定义
    scale = 1.0 / math.sqrt(hps.num_lstm_nodes[-1]) / 3.0
    lstm_init = tf.random_uniform_initializer(-scale, scale)
    with tf.variable_scope('lstm_nn', initializer=lstm_init):
        cells = []
        for i in range(hps.num_lstm_layers):
            cell = tf.contrib.rnn.BasicLSTMCell(
                hps.num_lstm_nodes[i],
                state_is_tuple=True)
            cell = tf.contrib.rnn.DropoutWrapper(
                cell,
                output_keep_prob=keep_prob)
            cells.append(cell)
        cell = tf.contrib.rnn.MultiRNNCell(cells)

        initial_state = cell.zero_state(batch_size, tf.float32)
        # rnn_outputs: [batch_size, num_timesteps, lstm_outputs[-1]]
        rnn_outputs, _ = tf.nn.dynamic_rnn(
            cell, em_inputs, initial_state=initial_state)
        last = rnn_outputs[:, -1, :]

    fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)
    with tf.variable_scope('fc', initializer=fc_init):
        fc1 = tf.layers.dense(last,
                              hps.num_fc_nodes,
                              activation=tf.nn.relu,
                              name='fc1')
        fc1_dropout = tf.contrib.layers.dropout(fc1, keep_prob)
        logits = tf.layers.dense(fc1_dropout,
                                 num_classes,
                                 name='fc2')

    with tf.name_scope('metrics'):
        softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=outputs)
        loss = tf.reduce_mean(softmax_loss)
        # [0, 1, 5, 4, 2] -> argmax: 2 最大值的下标
        y_pred = tf.argmax(tf.nn.softmax(logits),
                           1,
                           output_type=tf.int32, name="y_pred_model")
        correct_pred = tf.equal(outputs, y_pred)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    with tf.name_scope('train_op'):
        tvars = tf.trainable_variables()
        for var in tvars:
            tf.logging.info('variable name: %s' % (var.name))
        # grads, _ = tf.clip_by_global_norm(
        #     tf.gradients(loss, tvars), hps.clip_lstm_grads)
        # 设置动态变化的学习率
        learning_rate = tf.train.exponential_decay(learning_rate=hps.learning_rate,  # 初始化的学习率
                                                   global_step=global_step,
                                                   # 用于衰减计算的全局步骤。 一定不为负数。喂入一次 BACTH_SIZE 计为一次 global_step
                                                   decay_steps=20,  # 衰减速度，一定不能为负数，每间隔decay_steps次更新一次learning_rate值
                                                   decay_rate=0.96  # 衰减系数，衰减速率，其具体意义参看函数计算方程(对应α^t中的α)。
                                                   )
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step, var_list=tvars, name="trainop")
        # train_op = optimizer.apply_gradients(
        #     zip(grads, tvars), global_step=global_step)

    loss_summary = tf.summary.scalar("loss", loss)
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate)
    # merged_summary = tf.summary.merge_all()  # 将所有的summary的值都合并到一起
    merged_summary = tf.summary.merge([loss_summary, accuracy_summary, learning_rate_summary], name="merged_summary")
    merged_summary_test = tf.summary.merge([accuracy_summary], name="merged_summary_test")
    return ((inputs, outputs, keep_prob),
            (loss, accuracy),
            (train_op, global_step, merged_summary, merged_summary_test))


def create_cnn_model(hps, num_classes):
    num_timesteps = hps.num_timesteps
    batch_size = hps.batch_size

    inputs = tf.placeholder(tf.float32, (batch_size, num_timesteps), name="inputs")
    outputs = tf.placeholder(tf.int32, (batch_size,), name="outputs")
    keep_prob = tf.placeholder(tf.float32, name="keep_prob")  # dropout中保存多少

    global_step = tf.Variable(
        tf.zeros([], tf.int64), name="global_step", trainable=False)

    em_inputs = tf.expand_dims(inputs, -1)

    scale = 1.0 / math.sqrt(hps.num_filters) / 3.0
    cnn_init = tf.random_uniform_initializer(-scale, scale)
    with tf.variable_scope('cnn', initializer=cnn_init):
        # embed_inputs: [batch_size, timesteps, embed_size]
        # conv1d: [batch_size, timesteps, num_filters]
        conv1d = tf.layers.conv1d(
            em_inputs,
            hps.num_filters,
            hps.num_kernel_size,
            activation=tf.nn.relu,
        )
        global_maxpooling = tf.reduce_max(conv1d, axis=[1])


    fc_init = tf.uniform_unit_scaling_initializer(factor=1.0)
    with tf.variable_scope('fc', initializer=fc_init):
        fc1 = tf.layers.dense(global_maxpooling,
                              hps.num_fc_nodes,
                              activation=tf.nn.relu,
                              name='fc1')
        fc1_dropout = tf.contrib.layers.dropout(fc1, keep_prob)
        logits = tf.layers.dense(fc1_dropout,
                                 num_classes,
                                 name='fc2')

    with tf.name_scope('metrics'):
        softmax_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=logits, labels=outputs)
        loss = tf.reduce_mean(softmax_loss)
        # [0, 1, 5, 4, 2] -> argmax: 2 最大值的下标
        y_pred = tf.argmax(tf.nn.softmax(logits),
                           1,
                           output_type=tf.int32, name="y_pred_model")
        correct_pred = tf.equal(outputs, y_pred)
        accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

    with tf.name_scope('train_op'):
        # 设置动态变化的学习率
        learning_rate = tf.train.exponential_decay(learning_rate=hps.learning_rate,  # 初始化的学习率
                                                   global_step=global_step,
                                                   # 用于衰减计算的全局步骤。 一定不为负数。喂入一次 BACTH_SIZE 计为一次 global_step
                                                   decay_steps=20,  # 衰减速度，一定不能为负数，每间隔decay_steps次更新一次learning_rate值
                                                   decay_rate=0.96  # 衰减系数，衰减速率，其具体意义参看函数计算方程(对应α^t中的α)。
                                                   )
        optimizer = tf.train.AdamOptimizer(learning_rate)
        train_op = optimizer.minimize(loss, global_step=global_step,name="trainop")

    loss_summary = tf.summary.scalar("loss", loss)
    accuracy_summary = tf.summary.scalar("accuracy", accuracy)
    learning_rate_summary = tf.summary.scalar("learning_rate", learning_rate)
    # merged_summary = tf.summary.merge_all()  # 将所有的summary的值都合并到一起
    merged_summary = tf.summary.merge([loss_summary, accuracy_summary, learning_rate_summary], name="merged_summary")
    merged_summary_test = tf.summary.merge([accuracy_summary], name="merged_summary_test")
    return ((inputs, outputs, keep_prob),
            (loss, accuracy),
            (train_op, global_step, merged_summary, merged_summary_test))

