# -*- coding: utf-8 -*-

import numpy as np
import tensorflow as tf
from sklearn.metrics import f1_score
import os
import logging
from common.datasets import Datasets
import copy
import os
import json
import random
import codecs
import re

this_file_path = os.path.dirname(__file__)

logging.basicConfig(filename="log_rnn_self_attention_label_0.log", filemode="a", format="%(asctime)s-%(name)s-%(levelname)s-%(message)s", level=logging.INFO)

os.environ['CUDA_VISIBLE_DEVICES'] = '0'


class DataPreClass(object):
    chinese_vocab_fp = os.path.join(this_file_path, 'data/chinese_vocab.txt')

    # dataset_dir = "~/Documents/ai_challenge/"
    dataset_dir = "~/sentiment_analysis/AI-Challenge/"
    dataset = Datasets(dataset_dir)

    word_length = 400

    def __init__(self, model="train"):
        dataset_x = Datasets(self.dataset_dir)

        self.run_fun_mode = model

        if model == "train":
            self.train_data = dataset_x.train_data()
            self.dataset = self.train_data.values
        elif model == "dev":
            self.dev_data = dataset_x.dev_data()
            self.dataset = self.dev_data.values
        elif model == "test_a":
            self.test_data_a = dataset_x.test_data_a()
            self.dataset = self.test_data_a.values
        else:
            raise Exception

        self.batch_size = 128

        labels = [1, 0, -1, -2]

        self.labels_map = dict([(str(v), idx) for idx, v in enumerate(labels)])
        self.vocab_map = self.read_vocab_map()
        # self.dataset = self._read_file(self.file_path)
        self.iterobj = self.reset()

        self.iter_data = self.iter_data_set().__iter__()

    def iter_data_set(self):
        for a_line in self.dataset:
            label, _content = a_line[2:],  a_line[1]
            if len(_content) >= self.word_length:
                content = _content[:self.word_length]
            else:
                content = _content + " " * (self.word_length - len(_content))
            content_idx = [self.vocab_map.get(str(k), 0) for k in content]
            # this_label = label[0]
            label_inx = [self.labels_map[str(this_label)] for this_label in label]
            yield np.array(content_idx), label_inx

    def reset(self):
        dataset = [_ for _ in copy.deepcopy(self.dataset)]
        random.shuffle(dataset)
        self.dataset = dataset
        self.iter_data = self.iter_data_set().__iter__()
        return self.dataset

    def read_vocab_map(self):
        with codecs.open(self.chinese_vocab_fp, "r", encoding="utf-8") as fp:
            print("open read_vocab_map")
            vocab_list = fp.readlines()
            vocab_list = [_.strip() for _ in vocab_list]
            vocab_list = [re.sub("\s+", " ", _.strip()) for _ in vocab_list]

            vocab_list = vocab_list[0:8000]

        return dict([(str(v), idx) for idx, v in enumerate(vocab_list)])

    def __next__(self):
        # document_lst = []
        deal_x = []
        deal_y = []
        count = 0
        try:
            while count < self.batch_size:
                cur = next(self.iter_data)
                if not cur:
                    continue
                count += 1
                # document_lst.append(cur)
                # import pdb
                # pdb.set_trace()

                deal_x.append(cur[0])
                deal_y.append(cur[1])
        except StopIteration as iter_exception:
            if count == 0:
                raise iter_exception

        return np.array(deal_x, dtype=np.int32), np.array(deal_y, dtype=np.int32).transpose((1, 0))

    def __iter__(self):
        return self


class SelfAttention(object):
    def __init__(self,
                 sequence_length,
                 num_classes,
                 vocab_size,
                 embedding_size,
                 hidden_size,
                 d_a_size,
                 r_size,
                 fc_size,
                 p_coef,
                 learning_rate,
                 epoch):

        self.sequence_length = sequence_length
        self.num_classes = num_classes
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.hidden_size = hidden_size
        self.d_a_size = d_a_size
        self.r_size = r_size
        self.fc_size = fc_size
        self.p_coef = p_coef
        self.learning_rate = learning_rate
        self.epoch = epoch

        self.checkpointDir = "./model/rnn_self_attention_label_0/"
        self.chan = 1

        self.sess = tf.Session()

        self._placeholder_layers()
        self._embedding_layers()
        self._word_encoder_layers()
        self._word_attention_layers()
        self._sentence_embedding()
        self._fully_connected()
        self._output()
        self._penalization()
        self._cal_loss()
        self._cal_accuracy()
        self._build_train_op()

    def _placeholder_layers(self):
        # Placeholders for input, output and dropout
        self.input_text = tf.placeholder(tf.int32, shape=[None, self.sequence_length], name='input_text')
        self.input_y = tf.placeholder(tf.int32, shape=[self.chan, None], name='input_y')

        self.text_length = self._length(self.input_text)

    def _embedding_layers(self):
        # Embeddings
        with tf.name_scope("embedding"):
            # self.W_text = tf.Variable(tf.random_uniform([self.vocab_size, self.embedding_size], -1.0, 1.0), name="W_text")
            self.W_text = tf.get_variable(
                name="embedding_matrix", shape=[self.vocab_size, self.embedding_size], dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
            )
            self.embedded_chars = tf.nn.embedding_lookup(self.W_text, self.input_text)

    def _word_encoder_layers(self):
        # Bidirectional(Left&Right) Recurrent Structure
        self.H_reshape_list = []
        self.H_list = []
        with tf.name_scope("bi-lstm"):
            for i in range(0, self.chan):
                fw_cell = tf.nn.rnn_cell.GRUCell(self.hidden_size, name="fw_cell-%s" % i)
                bw_cell = tf.nn.rnn_cell.GRUCell(self.hidden_size, name="bw_cell-%s" % i)
                (output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(
                    cell_fw=fw_cell,
                    cell_bw=bw_cell,
                    inputs=self.embedded_chars,
                    sequence_length=self.text_length,
                    dtype=tf.float32)

                H = tf.concat([output_fw, output_bw], axis=2)
                self.H_list.append(H)
                H_reshape = tf.reshape(H, [-1, 2 * self.hidden_size])
                self.H_reshape_list.append(H_reshape)

    def _word_attention_layers(self):
        with tf.name_scope("self-attention"):
            self.initializer = tf.contrib.layers.xavier_initializer()
            self.A_list = []
            for i in range(0, self.chan):
                W_s1 = tf.get_variable("W_s1-%s" % i, shape=[2 * self.hidden_size, self.d_a_size])

                _H_s1 = tf.nn.tanh(tf.matmul(self.H_reshape_list[i], W_s1))

                W_s2 = tf.get_variable("W_s2-%s" % i, shape=[self.d_a_size, self.r_size])

                _H_s2 = tf.matmul(_H_s1, W_s2)

                _H_s2_reshape = tf.transpose(tf.reshape(_H_s2, [-1, self.sequence_length, self.r_size]), [0, 2, 1])

                A = tf.nn.softmax(_H_s2_reshape, name="attention-%s" % i)
                self.A_list.append(A)

    def _sentence_embedding(self):
        with tf.name_scope("sentence-embedding"):
            self.M_list = []
            for i in range(0, self.chan):
                M = tf.matmul(self.A_list[i], self.H_list[i])
                self.M_list.append(M)

    def _fully_connected(self):
        with tf.name_scope("fully-connected"):
            self.fc_list = []
            for i in range(0, self.chan):
                # self.M_pool = tf.reduce_mean(self.M, axis=1)
                # W_fc = tf.get_variable("W_fc", shape=[2 * hidden_size, fc_size], initializer=initializer)
                M_flat = tf.reshape(self.M_list[i], shape=[-1, 2 * self.hidden_size * self.r_size])
                W_fc = tf.get_variable("W_fc-%s" % i, shape=[2 * self.hidden_size * self.r_size, self.fc_size])
                # b_fc = tf.Variable(tf.constant(0.1, shape=[self.fc_size]), name="b_fc-%s" % i)
                b_fc = tf.get_variable(
                    name="b_fc-%s" % i, shape=[self.fc_size], initializer=tf.constant_initializer(0.)
                )

                fc = tf.nn.relu(tf.nn.xw_plus_b(M_flat, W_fc, b_fc), name="fc-%s" % i)
                self.fc_list.append(fc)

    def _output(self):
        with tf.name_scope("output"):
            self.logits_list = []
            self.predictions_list = []
            for i in range(0, self.chan):
                # W_output = tf.get_variable("W_output-%s" % i, shape=[self.fc_size, self.num_classes])
                # b_output = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b_output-%s" % i)
                W_output = tf.get_variable(
                    name="W_output-%s" % i, shape=[self.fc_size, self.num_classes],
                    initializer=tf.truncated_normal_initializer(stddev=0.1))

                b_output = tf.get_variable(
                    name="b_output-%s" % i, shape=[self.num_classes], initializer=tf.constant_initializer(0.)
                )
                logits = tf.nn.xw_plus_b(self.fc_list[i], W_output, b_output, name="logits-%s" % i)
                self.logits_list.append(logits)
                predictions = tf.argmax(logits, 1, name="predictions-%s" % i)
                self.predictions_list.append(predictions)

    def _penalization(self):
        with tf.name_scope("penalization"):
            self.P_list = []
            for i in range(0, self.chan):
                AA_T = tf.matmul(self.A_list[i], tf.transpose(self.A_list[i], [0, 2, 1]))
                I = tf.reshape(tf.tile(tf.eye(self.r_size), [tf.shape(self.A_list[i])[0], 1]), [-1, self.r_size, self.r_size])
                P = tf.square(tf.norm(AA_T - I, axis=[-2, -1], ord="fro"))
                self.P_list.append(P)

    def _cal_loss(self):
        # Calculate mean cross-entropy loss
        with tf.name_scope("loss"):
            loss_list = []
            for i in range(0, self.chan):
                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits_list[i], labels=self.input_y[i], name="a_loss-%s" % i)
                loss_P = tf.reduce_mean(self.P_list[i] * self.p_coef)
                loss = tf.reduce_mean(losses) + loss_P
                loss_list.append(loss)

            self.loss = tf.reduce_mean(loss_list)

    def _build_train_op(self):
        with tf.name_scope("train_op"):
            self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss)

    def _cal_accuracy(self):
        # Accuracy
        with tf.name_scope("accuracy"):
            self.accuracy_list = []
            for i in range(0, self.chan):
                correct_predictions = tf.equal(tf.cast(self.predictions_list[i], tf.int32), self.input_y[i])
                accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32), name="accuracy-%s" % i)
                self.accuracy_list.append(accuracy)

            self.accuracy = tf.reduce_mean(tf.cast(self.accuracy_list, "float"))

    @staticmethod
    def _length(seq):
        relevant = tf.sign(tf.abs(seq))
        length = tf.reduce_sum(relevant, reduction_indices=1)
        length = tf.cast(length, tf.int32)
        return length

    def _save(self):
        if not tf.gfile.Exists(self.checkpointDir):
            tf.gfile.MakeDirs(self.checkpointDir)
        saver = tf.train.Saver()
        saver.save(sess=self.sess, save_path=self.checkpointDir + "model")

    def train(self):
        self.sess.run(tf.global_variables_initializer())
        print("\nbegin train ....\n")
        step = 0
        _iter = 0
        dataPreClass = DataPreClass(model="train")
        for i in range(self.epoch):
            dataPreClass.reset()
            for input_x, input_y in dataPreClass:
                input_y = input_y[0:self.chan]
                step += len(input_x)
                _iter += 1

                _, loss, acc, predictions_list = self.sess.run(
                    fetches=[self.train_op, self.loss, self.accuracy, self.predictions_list],
                    feed_dict={self.input_text: input_x, self.input_y: input_y})
                f1_list = []
                for idx, predictions in enumerate(predictions_list):
                    f1 = f1_score(input_y[idx], predictions, average='macro')
                    f1_list.append(f1)
                avg_f1 = np.array(f1_list).mean()

                logging.info("<Train>\t Epoch: [%d] Iter: [%d] Step: [%d] Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" %
                             (i + 1, _iter, step, loss, acc, avg_f1))
            self._save()

    def test(self):
        print("\nbegin test ....\n")
        _iter = 0
        dataPreClass = DataPreClass(model="dev")
        for input_x, input_y in dataPreClass:
            input_y = input_y[0:self.chan]
            _iter += 1
            acc, loss, predictions_list = self.sess.run(
                fetches=[self.accuracy, self.loss, self.predictions_list],
                feed_dict={self.input_text: input_x, self.input_y: input_y})

            f1_list = []
            for idx, predictions in enumerate(predictions_list):
                f1 = f1_score(input_y[idx], predictions, average='macro')
                f1_list.append(f1)
            avg_f1 = np.array(f1_list).mean()

            logging.info("Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" % (loss, acc, avg_f1))


if __name__ == '__main__':
    self_att_model = SelfAttention(
        sequence_length=400,
        num_classes=4,
        vocab_size=8000,
        embedding_size=128,
        hidden_size=256,
        d_a_size=350,
        r_size=30,
        fc_size=2000,
        p_coef=1,
        learning_rate=1e-3,
        epoch=3,
    )

    self_att_model.train()
    self_att_model.test()

