#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-8-31
import os
import re

import numpy as np
import tensorflow as tf
from tensorflow.nn.rnn_cell import LSTMCell


class RNNModel:

    def __init__(self, num_rnn_unit, num_hidden_unit, learning_rate=0.001, dropout=0.5, seq_len=30,
                 emb_size=128, use_attention=False, use_embedding_layer=False, vocab_size=-1):
        self.rnn_unit = num_rnn_unit
        self.hidden_unit = num_hidden_unit
        self.seq_len = seq_len
        self.vocab_size = vocab_size
        self.emb_size = emb_size
        self.dropout = dropout
        self.use_embedding_layer = use_embedding_layer
        if use_embedding_layer:
            self.input = tf.placeholder(tf.int32, (None, self.seq_len))
        else:
            self.input = tf.placeholder(tf.float32, (None, self.seq_len, self.emb_size))
        self.labels = tf.placeholder(tf.int32, (None, 31))
        self.global_step = tf.Variable(0, trainable=False, name='global_step')
        self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')
        self.learning_rate = learning_rate
        self.use_attention = use_attention
        self._build()
        self.saver = tf.train.Saver()

    def _attention(self, inputs, attention_size, time_major=False, return_alphas=False):
        """
        Attention mechanism layer which reduces RNN/Bi-RNN outputs with Attention vector.
        The idea was proposed in the article by Z. Yang et al., "Hierarchical Attention Networks
         for Document Classification", 2016: http://www.aclweb.org/anthology/N16-1174.
        Variables notation is also inherited from the article

        Args:
            inputs: The Attention inputs.
                Matches outputs of RNN/Bi-RNN layer (not final state):
                    In case of RNN, this must be RNN outputs `Tensor`:
                        If time_major == False (default), this must be a tensor of shape:
                            `[batch_size, max_time, cell.output_size]`.
                        If time_major == True, this must be a tensor of shape:
                            `[max_time, batch_size, cell.output_size]`.
                    In case of Bidirectional RNN, this must be a tuple (outputs_fw, outputs_bw) containing the forward and
                    the backward RNN outputs `Tensor`.
                        If time_major == False (default),
                            outputs_fw is a `Tensor` shaped:
                            `[batch_size, max_time, cell_fw.output_size]`
                            and outputs_bw is a `Tensor` shaped:
                            `[batch_size, max_time, cell_bw.output_size]`.
                        If time_major == True,
                            outputs_fw is a `Tensor` shaped:
                            `[max_time, batch_size, cell_fw.output_size]`
                            and outputs_bw is a `Tensor` shaped:
                            `[max_time, batch_size, cell_bw.output_size]`.
            attention_size: Linear size of the Attention weights.
            time_major: The shape format of the `inputs` Tensors.
                If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
                If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
                Using `time_major = True` is a bit more efficient because it avoids
                transposes at the beginning and end of the RNN calculation.  However,
                most TensorFlow data is batch-major, so by default this function
                accepts input and emits output in batch-major form.
            return_alphas: Whether to return attention coefficients variable along with layer's output.
                Used for visualization purpose.
        Returns:
            The Attention output `Tensor`.
            In case of RNN, this will be a `Tensor` shaped:
                `[batch_size, cell.output_size]`.
            In case of Bidirectional RNN, this will be a `Tensor` shaped:
                `[batch_size, cell_fw.output_size + cell_bw.output_size]`.
        """

        if isinstance(inputs, tuple):
            # In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
            inputs = tf.concat(inputs, 2)

        if time_major:
            # (T,B,D) => (B,T,D)
            inputs = tf.transpose(inputs, [1, 0, 2])

        hidden_size = inputs.shape[2].value  # D value - hidden size of the RNN layer

        # Trainable parameters
        w_omega = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
        b_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
        u_omega = tf.Variable(tf.random_normal([attention_size], stddev=0.1))

        with tf.name_scope('v'):
            # Applying fully connected layer with non-linear activation to each of the B*T timestamps;
            #  the shape of `v` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
            v = tf.tanh(tf.tensordot(inputs, w_omega, axes=1) + b_omega)

        # For each of the timestamps its vector of size A from `v` is reduced with `u` vector
        vu = tf.tensordot(v, u_omega, axes=1, name='vu')  # (B,T) shape
        alphas = tf.nn.softmax(vu, name='alphas')  # (B,T) shape

        # Output of (Bi-)RNN is reduced with attention vector; the result_and_model has (B,D) shape
        output = tf.reduce_sum(inputs * tf.expand_dims(alphas, -1), 1)

        if not return_alphas:
            return output
        else:
            return output, alphas

    def _build(self):
        if self.use_embedding_layer:
            assert self.vocab_size > 0

            embedding = tf.get_variable('embedding', (self.vocab_size, self.emb_size), tf.float32,
                                        initializer=tf.initializers.random_uniform(-1.0, 1.0))
            embedded = tf.nn.embedding_lookup(embedding, self.input)

        else:
            embedded = self.input

        with tf.variable_scope('RNN'):
            lstm_cell = LSTMCell(self.rnn_unit, activation=tf.tanh, initializer=tf.initializers.glorot_uniform())
            rnn_outputs, rnn_states = tf.nn.static_rnn(lstm_cell, tf.unstack(embedded, self.seq_len, 1),
                                                       dtype=tf.float32)
            print(rnn_states)
        if self.use_attention:
            att_output, self.alphas = self._attention(rnn_outputs, self.rnn_unit, time_major=True, return_alphas=True)
            flatten = att_output
        else:
            # flatten = tf.concat(rnn_outputs, axis=1)
            flatten = rnn_states.h
        dense1 = tf.layers.dense(flatten, self.hidden_unit, activation=tf.tanh,
                                 kernel_initializer=tf.initializers.glorot_uniform(),
                                 bias_initializer=tf.initializers.zeros())
        dropout = tf.layers.dropout(dense1, self.keep_prob)
        self.logits = tf.layers.dense(dropout, 31, kernel_initializer=tf.initializers.glorot_uniform(),
                                      bias_initializer=tf.initializers.zeros())
        print(self.logits)

        with tf.variable_scope('losses'):
            self.loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(labels=tf.cast(self.labels, tf.float32),
                                                           logits=self.logits))

        with tf.variable_scope('outputs'):
            # self.output = tf.sigmoid(self.logits, 'sigmoid_output')
            self.output = tf.argmax(self.logits, axis=1, name='output_tensor')

        # self.optimizer = tf.train.AdamOptimizer(self.learning_rate)
        self.optimizer = tf.train.RMSPropOptimizer(self.learning_rate)
        self.optimize_step = self.optimizer.minimize(self.loss, self.global_step)

        tf.summary.scalar('losses', self.loss)
        self.merged = tf.summary.merge_all()

    def compute_loss(self, sess: tf.Session, x, y):
        return sess.run(self.loss, feed_dict={self.input: x,
                                              self.labels: y,
                                              self.keep_prob: 1.0})

    def train(self, sess: tf.Session, x, y, summary: tf.summary.FileWriter = None):
        if summary:
            history, _ = sess.run([self.merged, self.optimize_step],
                                  feed_dict={self.input: x,
                                             self.labels: y,
                                             self.keep_prob: self.dropout})
            summary.add_summary(history)

        else:
            sess.run(self.optimize_step,
                     feed_dict={self.input: x,
                                self.labels: y,
                                self.keep_prob: self.dropout})

    def predict(self, sess: tf.Session, x):
        pred = sess.run(self.output,
                        feed_dict={self.input: x, self.keep_prob: 1.0})
        return pred

    def compute_accuracy(self, sess: tf.Session, x, y):
        pred = self.predict(sess, x)
        acc = np.mean(np.equal(pred, np.argmax(y, 1)))
        # tf.summary.scalar(acc)
        return acc

    def save(self, sess, acc, save_dir='smp_saved_model'):
        os.makedirs(save_dir, exist_ok=True)
        save_path = os.path.join(save_dir, 'model-acc{:4f}.ckpt'.format(acc))
        self.saver.save(sess, save_path, global_step=self.global_step)
        print('saved model to:', save_path)

    def load_model(self, sess, save_path):
        self.saver.restore(sess, save_path)
        print('loaded model from', save_path)

    def start_or_continue_training(self, sess: tf.Session, checkpoint_dir):
        """
        如果文件夹中存在checkpoint，则加载最新checkpoint
        :param sess:
        :param checkpoint_dir: 保存checkpoint的文件夹
        :return: 最大的dev_acc
        """
        ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
        if ckpt:
            model_path = ckpt.model_checkpoint_path
            max_dev_acc = float(re.findall(r'\d+\.\d+', model_path)[0])
            self.saver.restore(sess, model_path)
            print("load checkpoint:", model_path)

        else:
            max_dev_acc = 0
            sess.run(tf.global_variables_initializer())
        return max_dev_acc


if __name__ == '__main__':
    model = RNNModel(128, 50)

    random_seed = np.random.RandomState(100)
    data_x = random_seed.randint(0, 10000, (500, 30, 128))
    y = random_seed.randint(0, 2, (500, 1))
    print(data_x.shape)

    with tf.Session() as sess:
        summary = tf.summary.FileWriter('train_log', sess.graph)
        sess.run(tf.initialize_all_variables())
        for step in range(1000):
            model.train(sess, data_x, y, summary)

            if step % 100 == 0:
                loss = model.compute_loss(sess, data_x, y)
                acc = model.compute_accuracy(sess, data_x, y)
                print('acc: {acc}, loss: {loss}'.format(acc=acc, loss=loss))
