#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Created by Ross on 18-11-15
import os

import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
import numpy as np
import preprocess
from model.Actor import Actor
from utils import is_oov, generate_batch
e = 0
SLICES_DIR = './slices'
os.makedirs(SLICES_DIR, exist_ok=True)
tf.enable_eager_execution()


class Embedding(tf.keras.layers.Layer):

    def __init__(self, vocab_size, embedding_size, **kwargs):
        super(Embedding, self).__init__(**kwargs)
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size

    def build(self, input_shape):
        self.embedding = self.add_variable('embedding_matrix', (self.vocab_size, self.embedding_size), tf.float32,
                                           tf.initializers.random_uniform(-0.1, 0.1), trainable=True)

    def call(self, inputs):
        return tf.nn.embedding_lookup(self.embedding, inputs, name='embedded')


class LSTM(tf.keras.Model):
    """A static RNN.
    Similar to tf.nn.dynamic_rnn, implemented as a class.
    """

    def __init__(self, hidden_dim, keep_ratio):
        super(LSTM, self).__init__()
        self.keep_ratio = keep_ratio
        self.cell = tf.nn.rnn_cell.BasicLSTMCell(hidden_dim)
        self.actor = Actor(2, 128, 0.95)

    def call(self, input_seq, training=None):
        self.actor.reset()
        batch_size = int(input_seq.shape[0])
        seq_len = int(input_seq.shape[1])
        state = self.cell.zero_state(batch_size, tf.float32)
        # action = tf.get_variable('actions', shape=(batch_size, seq_len), dtype=tf.float32,
        #                          initializer=tf.initializers.identity())
        # end_step = tf.argmax(tf.cast(tf.equal(action, 1.0), tf.int32), axis=1, output_type=tf.int32)
        output_states = []
        actions = []
        input_seq = tf.unstack(input_seq, num=seq_len, axis=1)
        for inp in input_seq:
            output, state = self.cell(inp, state)
            output_states.append(output)
            if training == 'ex_actor':
                pass
            else:
                actions.append(self.actor.choose_action(output))
        if training == 'ex_actor':
            actions = np.zeros([batch_size, seq_len])
            actions[:, -1] = 1
            actions = tf.convert_to_tensor(actions, tf.int32)
        else:
            actions = tf.stack(actions, axis=1)
        output_states = tf.stack(output_states, axis=1)
        if training:
            output_states = tf.nn.dropout(output_states, self.keep_ratio)

        return output_states, output, actions

    def _add_cells(self, cells):
        # "Magic" required for keras.Model classes to track all the variables in
        # a list of Layer objects.
        # TODO(ashankar): Figure out API so user code doesn't have to do this.
        for i, c in enumerate(cells):
            setattr(self, "cell-%d" % i, c)
        return cells


class RLBModel(tf.keras.Model):
    """
    Reinforcement Learning base Model
    """

    def __init__(self, rnn_units, hidden_units, num_class, vocab_size, embedding_size, keep_prob=0.5, **kwargs):
        super(RLBModel, self).__init__(**kwargs)
        self.rnn_units = rnn_units
        self.hidden_units = hidden_units
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.keep_prob = keep_prob
        self.num_class = num_class

        if self.vocab_size != 0:
            self.embedding = Embedding(self.vocab_size, self.embedding_size)
        self.rnn = LSTM(self.rnn_units, self.keep_prob)
        self.dense = tf.keras.layers.Dense(self.hidden_units)
        self.output_layer = tf.keras.layers.Dense(self.num_class)

    def call(self, inputs, training=None):
        if self.vocab_size != 0:
            embedded = self.embedding(inputs)
        else:
            embedded = inputs
        _, final_state, actions = self.rnn(embedded, training)
        state_slices = tf.argmax(actions, axis=1, output_type=tf.int32)
        batch = tf.range(0, inputs.shape[0], dtype=tf.int32)
        slices = list(zip(batch.numpy(), state_slices.numpy()))
        if training == 'output_action':
            df = pd.DataFrame(slices, columns=('index', 'slices'))
            df.to_csv(os.path.join(SLICES_DIR, str(e) + '.csv'), index=False)
        stop_state = tf.gather_nd(_, slices)
        # print(stop_state.shape)
        dense1 = self.dense(stop_state)
        out = self.output_layer(dense1)
        output = tf.nn.softmax(out)
        return output

    def get_actor_varable(self):
        params = self.rnn.actor.variables
        return params

    def loss_fn(self, x, y, mode=None):
        h = self.call(x, mode)
        return tf.reduce_mean(-tf.reduce_sum(y * tf.log(h)))


if __name__ == '__main__':

    DATA_DIR = '../data/chat-detection-dataset'
    EPOCHS = 1000
    LOG_DIR = 'smp_train_log'
    TRAIN_DIR = 'smp_saved_model'
    MAX_LEN = 24

    for fold in range(10):
        train_x, dev_x, test_sentence, train_y, dev_y, test_y = preprocess.generate_train_dev_test(
            os.path.join(DATA_DIR, 'dataset%d.txt' % fold))
        vocab, vocab_rev = preprocess.generate_vocab(train_x)
        train_x = preprocess.tokenize_sentences(train_x, vocab_rev)
        train_x = pad_sequences(train_x, MAX_LEN, dtype='int32')
        dev_x = preprocess.tokenize_sentences(dev_x, vocab_rev)
        dev_x = pad_sequences(dev_x, MAX_LEN, dtype='int32')
        test_x = preprocess.tokenize_sentences(test_sentence, vocab_rev)
        test_x = pad_sequences(test_x, MAX_LEN, dtype='int32', )
        train_y = to_categorical(train_y, 2)
        dev_y = to_categorical(dev_y, 2)
        test_y = to_categorical(test_y, 2)
        oov_list = [is_oov(_) for _ in test_x]

        model = RLBModel(rnn_units=128, hidden_units=64, num_class=2, vocab_size=len(vocab), embedding_size=300)
        dummy_x = tf.zeros((3, MAX_LEN), dtype=tf.int32)
        model._set_inputs(dummy_x)

        # with tf.GradientTape() as tape:
        #     loss = model.loss_fn(train_x[:2], train_y[:2])
        #     v = [i for i in model.variables if i not in model.dense.variables]
        #     g = tape.gradient(loss, v)

        # actor_v = model.rnn.actor.variables
        # ex_actor_v = [i for i in model.variables if i not in actor_v]
        #
        model.compile(tf.train.AdamOptimizer(0.001), 'categorical_crossentropy', metrics=['accuracy'])
        #
        # for epoch in tqdm(range(1, EPOCHS + 1)):
        #     for x, y in generate_batch(train_x, train_y, 32, shuffle=True, undersampling=False):
        #         with tf.GradientTape() as tape:
        #             loss = model.loss_fn(x, y)
        #             g = tape.gradient(loss, ex_actor_v)
        #             model.optimizer.apply_gradients(zip(g, ex_actor_v))

        for epoch in range(1, EPOCHS + 1):
            model.fit(train_x, train_y, 32, epochs=1, verbose=1, validation_data=[dev_x, dev_y])
            model.call(dev_x)
            e += 1
        # with tf.GradientTape() as tape:
        #     pred = model(test_x)
        # tape.gradient(pred, )
