# encoding: utf-8

import tensorflow as tf
import numpy as np
import random
import copy
import os
import json
import random
from sklearn.metrics import f1_score
import logging
import sys

this_file_path = os.path.dirname(__file__)

logging.basicConfig(filename="log_att_raw.log", filemode="a", format="%(asctime)s-%(name)s-%(levelname)s-%(message)s", level=logging.INFO)


from common.datasets import Datasets
import os
import codecs


os.environ['CUDA_VISIBLE_DEVICES'] = '1'


class DataPreClass(object):
    chinese_vocab_fp = os.path.join(this_file_path, 'data/chinese_vocab.txt')

    # dataset_dir = "~/Documents/AI-Challenge/"
    # dataset_dir = "~/work/sentiment_analysis/AI-Challenge/"
    dataset_dir = "~/sentiment_analysis/AI-Challenge/"
    dataset = Datasets(dataset_dir)

    word_length = 800

    def __init__(self, model="train"):
        dataset_x = Datasets(self.dataset_dir)

        self.run_fun_mode = model

        if model == "train":
            self.train_data = dataset_x.train_data()
            self.dataset = self.train_data.values
        elif model == "dev":
            self.dev_data = dataset_x.dev_data()
            self.dataset = self.dev_data.values
        elif model == "test_a":
            self.test_data_a = dataset_x.test_data_a()
            self.dataset = self.test_data_a.values
        else:
            raise Exception

        self.batch_size = 128

        labels = [1, 0, -1, -2]

        self.labels_map = dict([(str(v), idx) for idx, v in enumerate(labels)])
        self.vocab_map = self.read_vocab_map()
        # self.dataset = self._read_file(self.file_path)
        self.iterobj = self.reset()

        self.iter_data = self.iter_data_set().__iter__()

    def iter_data_set(self):
        for a_line in self.dataset:
            label, _content = a_line[2:],  a_line[1]
            if len(_content) >= self.word_length:
                content = _content[:self.word_length]
            else:
                content = _content + " " * (self.word_length - len(_content))
            content_idx = [self.vocab_map.get(str(k), 0) for k in content]
            # this_label = label[0]
            label_inx = [self.labels_map[str(this_label)] for this_label in label]
            yield np.array(content_idx).reshape(-1, self.word_length), label_inx

    def reset(self):
        dataset = [_ for _ in copy.deepcopy(self.dataset)]
        random.shuffle(dataset)
        self.dataset = dataset
        self.iter_data = self.iter_data_set().__iter__()
        return self.dataset

    def read_vocab_map(self):
        with codecs.open(self.chinese_vocab_fp, "r", encoding="utf-8") as fp:
            print("open read_vocab_map")
            vocab_list = fp.readlines()
            vocab_list = [_.strip() for _ in vocab_list]

        return dict([(str(v), idx) for idx, v in enumerate(vocab_list)])

    def __next__(self):
        # document_lst = []
        deal_x = []
        deal_y = []
        count = 0
        try:
            while count < self.batch_size:
                cur = next(self.iter_data)
                if not cur:
                    continue
                count += 1
                # document_lst.append(cur)
                # import pdb
                # pdb.set_trace()

                deal_x.append(cur[0])
                deal_y.append(cur[1])
        except StopIteration as iter_exception:
            if count == 0:
                raise iter_exception

        return np.array(deal_x, dtype=np.int32), np.array(deal_y, dtype=np.int32).transpose((1, 0))

    def __iter__(self):
        return self


class RNNAttentionModel(object):
    def __init__(self,
                vocab_size,
                embedding_size,
                word_num_hidden,
                word_attention_size,
                sentence_num_hidden,
                sentence_attention_size,
                num_classes,
                learning_rate,
                epoch,
                ):

        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.word_num_hidden = word_num_hidden
        self.word_attention_size = word_attention_size
        self.sentence_num_hidden = sentence_num_hidden
        self.sentence_attention_size = sentence_attention_size
        self.num_classes = num_classes
        self.chan = 20
        self.learning_rate = learning_rate
        self.epoch = epoch
        self.checkpointDir = "model/rnn_attention_raw/"

        self.sess = tf.Session()
        self._placeholder_layers()
        self._embedding_layers()
        self._word_encoder_layers()
        self._word_attention_layers()
        self._sentence_encoder_layers()
        self._sentence_attention_layers()
        self._inference()
        self._build_train_op()

    def _placeholder_layers(self):
        # batch * sentence * words
        # 这里的 sentence 为 1，因为只有一个句子。
        self.inputs = tf.placeholder(dtype=tf.int32, shape=[None, None, None], name="inputs")
        self.targets = tf.placeholder(dtype=tf.int32, shape=[self.chan, None], name="targets")
        self.keep_prob = tf.placeholder(dtype=tf.float32, shape=None, name="keep_prob")

        self.word_length = tf.reduce_sum(
            tf.cast(tf.not_equal(tf.cast(0, self.inputs.dtype), self.inputs), tf.int32), axis=-1
        )
        self.sentence_length = tf.reduce_sum(
            tf.cast(tf.not_equal(tf.cast(0, self.inputs.dtype), self.word_length), tf.int32), axis=-1
        )

    def _embedding_layers(self):
        with tf.variable_scope(name_or_scope="embedding_layers"):
            embedding_matrix = tf.get_variable(
                name="embedding_matrix", shape=[self.vocab_size, self.embedding_size], dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
            )
            self.embedded_inputs = tf.nn.embedding_lookup(params=embedding_matrix, ids=self.inputs)
            print(self.embedded_inputs.shape)
            # [B * S * W * D]
            self.origin_shape = tf.shape(self.embedded_inputs)
            self.origin_shape_b, self.origin_shape_s, self.origin_shape_w, self.origin_shape_d = \
                self.origin_shape[0], self.origin_shape[1], self.origin_shape[2], self.origin_shape[3]

    def _word_encoder_layers(self):
        # 先做单个句子。
        with tf.variable_scope(name_or_scope="word_encoder_layers"):
            self.word_encoder_output_list = []
            for i in range(0, self.chan):
                cell_fw = tf.nn.rnn_cell.GRUCell(num_units=self.word_num_hidden, name="cell_fw-%s" % i)
                cell_bw = tf.nn.rnn_cell.GRUCell(num_units=self.word_num_hidden, name="cell_bw-%s" % i)
                word_inputs = tf.reshape(self.embedded_inputs, [self.origin_shape_b * self.origin_shape_s,
                                                                self.origin_shape_w, self.embedding_size])
                word_length = tf.reshape(self.word_length, [self.origin_shape_b * self.origin_shape_s])
                (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                    cell_fw=cell_fw, cell_bw=cell_bw, inputs=word_inputs, sequence_length=word_length,
                    dtype=tf.float32, time_major=False
                )
                # 连起来
                word_encoder_output = tf.nn.dropout(x=tf.concat([output_fw, output_bw], axis=-1), keep_prob=self.keep_prob, name="word_encoder_output-%s" % i)
                self.word_encoder_output_list.append(word_encoder_output)

    def _word_attention_layers(self):
        with tf.variable_scope("word_attention_layers"):
            self.word_attention_output_list = []
            for i in range(0, self.chan):
                w_w = tf.get_variable(
                    name="w_w-%s"%i, shape=[2 * self.word_num_hidden, self.word_attention_size],
                    initializer=tf.truncated_normal_initializer(stddev=0.1)
                )
                b_w = tf.get_variable(name="b_w-%s"%i, shape=[self.word_attention_size], initializer=tf.constant_initializer(0.))
                u_w = tf.get_variable(
                    name="u_w-%s"%i, shape=[self.word_attention_size, 1], initializer=tf.truncated_normal_initializer(stddev=0.1))

                v_w = tf.tanh(tf.nn.xw_plus_b(tf.reshape(self.word_encoder_output_list[i], [-1, 2 * self.word_num_hidden]), w_w, b_w), name="v_w-%s" % i)
                # 第3维的1是补的，为了能进行乘法运算，alpha第四维 * self.word_encoder_output 的第3维，结果见matmul
                alpha_w = tf.nn.softmax(tf.reshape(tf.matmul(v_w, u_w), [self.origin_shape_b*self.origin_shape_s, 1, self.origin_shape_w]), name="alpha_w-%s" % i)
                # si
                word_attention_output = tf.reduce_sum(tf.matmul(alpha_w, self.word_encoder_output_list[i]), axis=1, name="word_attention_output-%s" % i)
                self.word_attention_output_list.append(word_attention_output)

    def _sentence_encoder_layers(self):
        with tf.variable_scope(name_or_scope="sentence_encoder_layers"):
            self.sentence_encoder_output_list = []
            for i in range(0, self.chan):
                cell_fw = tf.nn.rnn_cell.GRUCell(num_units=self.sentence_num_hidden, name="cell_fw-%s" % i)
                cell_bw = tf.nn.rnn_cell.GRUCell(num_units=self.sentence_num_hidden, name="cell_bw-%s" % i)

                sentence_level_inputs = tf.reshape(self.word_attention_output_list[i], [
                    self.origin_shape_b, self.origin_shape_s, 2 * self.word_num_hidden], name="sentence_level_inputs-%s" % i)

                (output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
                    cell_fw=cell_fw, cell_bw=cell_bw, inputs=sentence_level_inputs,
                    sequence_length=self.sentence_length,
                    dtype=tf.float32, time_major=False
                )
                sentence_encoder_output = tf.nn.dropout(x=tf.concat([output_fw, output_bw], axis=-1), keep_prob=self.keep_prob, name="sentence_encoder_output-%s" % i)
                self.sentence_encoder_output_list.append(sentence_encoder_output)

    def _sentence_attention_layers(self):
        with tf.variable_scope("sentence_attention_layers"):
            self.sentence_attention_output_list = []
            for i in range(0, self.chan):
                w_1 = tf.get_variable(
                    name="w_1-%s" % i, shape=[2 * self.sentence_num_hidden, self.sentence_attention_size],
                    initializer=tf.truncated_normal_initializer(stddev=0.1),
                )
                b_1 = tf.get_variable(name="b_1-%s" % i, shape=[self.sentence_attention_size], initializer=tf.constant_initializer(0.))
                u_1 = tf.get_variable(
                    name="u_1-%s" % i, shape=[self.sentence_attention_size, 1], initializer=tf.truncated_normal_initializer(stddev=0.1))
                v_1 = tf.nn.xw_plus_b(tf.reshape(self.sentence_encoder_output_list[i], [-1, 2 * self.sentence_num_hidden]), w_1, b_1, name="v_1-%s" % i)  # B*T*A
                s_1 = tf.matmul(v_1, u_1, name="s_1-%s" % i)
                alphas_1 = tf.nn.softmax(tf.reshape(s_1, [self.origin_shape[0], 1, self.origin_shape[1]]), name="alphas_1-%s" % i)
                sentence_attention_output = tf.reduce_sum(tf.matmul(alphas_1, self.sentence_encoder_output_list[i]), axis=1, name="sentence_attention_output-%s" % i)
                self.sentence_attention_output_list.append(sentence_attention_output)

    def _inference(self):
        with tf.variable_scope("train_op"):
            self.logits_list = []
            self.accuracy_val_list = []
            self.predictions_list = []
            for i in range(0, self.chan):
                w = tf.get_variable(
                    name="w-%s" % i, shape=[2 * self.sentence_num_hidden, self.num_classes],
                    initializer=tf.truncated_normal_initializer(stddev=0.1))
                b = tf.get_variable(
                    name="b-%s" % i, shape=[self.num_classes], initializer=tf.constant_initializer(0.)
                )
                logits = tf.matmul(self.sentence_attention_output_list[i], w) + b
                self.logits_list.append(logits)
                predictions = tf.argmax(logits, axis=1, name="predictions-%s" % i)
                correct_prediction = tf.equal(tf.cast(predictions, tf.int32), self.targets[i])
                self.predictions_list.append(predictions)
                accuracy_val = tf.reduce_mean(tf.cast(correct_prediction, "float"), name="accuracy%s" % i)
                self.accuracy_val_list.append(accuracy_val)
            self.accuracy_val = tf.reduce_mean(tf.cast(self.accuracy_val_list, "float"))

    def _build_train_op(self):
        loss_list = []
        for i in range(0, self.chan):
            a_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.targets[i], logits=self.logits_list[i], name="a_loss-i%s" % i)
            loss = tf.reduce_mean(a_loss)
            loss_list.append(loss)
        self.loss = tf.reduce_mean(loss_list)
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        self.train_op = optimizer.minimize(self.loss)

    def _save(self):
        if not tf.gfile.Exists(self.checkpointDir):
            tf.gfile.MakeDirs(self.checkpointDir)
        saver = tf.train.Saver()
        saver.save(sess=self.sess, save_path=self.checkpointDir + "model")

    def train(self):
        self.sess.run(tf.global_variables_initializer())
        print("\nbegin train ....\n")
        step = 0
        _iter = 0
        dataPreClass = DataPreClass(model="train")
        for i in range(self.epoch):
            dataPreClass.reset()
            for input_x, input_y in dataPreClass:
                step += len(input_x)
                _iter += 1

                _, loss, acc, predictions_list = self.sess.run(
                    fetches=[self.train_op, self.loss, self.accuracy_val, self.predictions_list],
                    feed_dict={self.inputs: input_x, self.targets: input_y, self.keep_prob: 0.5})
                f1_list = []
                for idx, predictions in enumerate(predictions_list):
                    f1 = f1_score(input_y[idx], predictions, average='macro')
                    f1_list.append(f1)
                avg_f1 = np.array(f1_list).mean()

                logging.info("<Train>\t Epoch: [%d] Iter: [%d] Step: [%d] Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" %
                             (i + 1, _iter, step, loss, acc, avg_f1))
            self._save()

    def test(self):
        print("\nbegin test ....\n")
        _iter = 0
        dataPreClass = DataPreClass(model="dev")
        for input_x, input_y in dataPreClass:
            _iter += 1
            acc, loss, predictions_list = self.sess.run(
                fetches=[self.accuracy_val, self.loss, self.predictions_list],
                feed_dict={self.inputs: input_x, self.targets: input_y, self.keep_prob: 1.})

            f1_list = []
            for idx, predictions in enumerate(predictions_list):
                f1 = f1_score(input_y[idx], predictions, average='macro')
                f1_list.append(f1)
            avg_f1 = np.array(f1_list).mean()

            logging.info("Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" % (loss, acc, avg_f1))


if __name__ == '__main__':
    #sequence_length=400, num_classes=45, vocab_size=6020,
    #                     embedding_size=128, filter_sizes=[3, 6, 9], num_filters=256)
    # with tf.device('/cpu:0'):

    rnn_attention_model = RNNAttentionModel(
        vocab_size=8590,
        embedding_size=128,
        word_num_hidden=64,
        word_attention_size=64,
        sentence_num_hidden=64,
        sentence_attention_size=32,
        num_classes=4,
        learning_rate=1e-3,
        epoch=3,
    )

    rnn_attention_model.train()
    rnn_attention_model.test()













