#!/usr/bin/env python3
# encoding: utf-8

import tensorflow as tf
import numpy as np
import random
import copy
import os
import json
import random
from sklearn.metrics import f1_score
import sys

this_file_path = os.path.dirname(__file__)


from common.datasets import Datasets
import os
import codecs


# os.environ['CUDA_VISIBLE_DEVICES'] = '0，1'


class DataPreClass(object):
    chinese_vocab_fp = os.path.join(this_file_path, 'data/chinese_vocab.txt')

    # dataset_dir = "~/Documents/ai_challenge/"
    # dataset_dir = "~/work/sentiment_analysis/AI-Challenge/"
    dataset_dir = "~/sentiment_analysis/AI-Challenge/"
    dataset = Datasets(dataset_dir)

    def __init__(self, model="train"):
        self.run_fun_mode = model

        if model == "train":
            self.dataset = self.dataset.train_data().values
        elif model == "dev":
            self.dataset = self.dataset.dev_data().values
        elif model == "test_a":
            self.dataset = self.dataset.test_data_a().values
        else:
            raise Exception

        self.batch_size = 256
        self.word_num = 800

        labels = [1, 0, -1, -2]

        self.labels_map = dict([(str(v), idx) for idx, v in enumerate(labels)])
        self.vocab_map = self.read_vocab_map()
        self.iterobj = self.reset()

        self.iter_data = self.iter_data_set().__iter__()

    def iter_data_set(self):
        for a_line in self.dataset:
            label, _content = a_line[2:],  a_line[1]
            _content = _content[1:-1]

            if self.run_fun_mode == "test_a":
                label = []

            if len(_content) >= self.word_num:
                content = _content[:self.word_num]
            else:
                content = _content + " " * (self.word_num - len(_content))
            content_idx = [self.vocab_map.get(str(k), 0) for k in content]
            # this_label = label[0]
            label_inx = [self.labels_map[str(this_label)] for this_label in label]
            yield np.array(content_idx).reshape(-1, self.word_num), label_inx

    def reset(self):
        if self.run_fun_mode == "test_a":
            self.iter_data = self.iter_data_set().__iter__()
            return self.dataset

        else:
            dataset = [_ for _ in copy.deepcopy(self.dataset)]
            random.shuffle(dataset)
            self.dataset = dataset
            self.iter_data = self.iter_data_set().__iter__()
            return self.dataset

    def read_vocab_map(self):
        with codecs.open(self.chinese_vocab_fp, "r", encoding="utf-8") as fp:
            print("open read_vocab_map")
            vocab_list = fp.readlines()
            vocab_list = [_.strip() for _ in vocab_list]

        return dict([(str(v), idx) for idx, v in enumerate(vocab_list)])

    def __next__(self):
        # document_lst = []
        deal_x = []
        deal_y = []
        count = 0
        try:
            while count < self.batch_size:
                cur = next(self.iter_data)
                if not cur:
                    continue
                count += 1
                # document_lst.append(cur)
                deal_x.append(cur[0][0])
                deal_y.append(cur[1])
        except StopIteration as iter_exception:
            if count == 0:
                raise iter_exception

        # return np.array(deal_x, dtype=np.int32), np.array(deal_y, dtype=np.int32).reshape(20, -1)
        return np.array(deal_x, dtype=np.int32), np.array(deal_y, dtype=np.int32).transpose((1, 0))

    def __iter__(self):
        return self


class TextCNN(object):
    """
    A CNN for text classification.
    Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
    """

    def __init__(
            self, sequence_length, num_classes, vocab_size,
            embedding_size, filter_sizes, num_filters):

        self.sequence_length = sequence_length
        self.num_classes = num_classes
        self.chan = 20
        self.vocab_size = vocab_size
        self.embedding_size = embedding_size
        self.filter_sizes = filter_sizes
        self.num_filters = num_filters
        self.checkpointDir = "model/cnn/"

        self._define()
        self._embedding_layer()
        self._conv()
        self._dropout()
        self._prediction()
        self._cal_loss()
        self._cal_acc()
        # self._cal_f1()
        self._grad()

        self.sess = tf.Session()

    def _define(self):
        self.input_x = tf.placeholder(tf.int32, [None, self.sequence_length], name="input_x")
        self.input_y = tf.placeholder(tf.int32, [self.chan, None], name="input_y")
        self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")

    def _embedding_layer(self):
        with tf.name_scope("embedding"):
            embedding_matrix = tf.get_variable(
                name="embedding_matrix", shape=[self.vocab_size, self.embedding_size], dtype=tf.float32,
                initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1)
            )
            self.embedded_chars = tf.nn.embedding_lookup(embedding_matrix, self.input_x)
            self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, 1)
            print("self.embedded_chars_expanded.shape ", self.embedded_chars_expanded.shape)

    def _conv(self):
        pooled_outputs_all = []
        for i in range(0, self.chan):
            pooled_outputs = []
            for filter_size in self.filter_sizes:
                with tf.name_scope("conv-maxpool-%s-%s" % (str(filter_size), str(i))):
                    # Convolution Layer
                    # filter_shape = [filter_size, self.embedding_size, 1, self.num_filters]
                    filter_shape = [1, filter_size, self.embedding_size, self.num_filters]
                    kernel = tf.get_variable(shape=filter_shape, name="conv-maxpool-%s-%s-W" % (str(filter_size), str(i)), initializer=tf.truncated_normal_initializer(stddev=0.01))
                    bias = tf.get_variable(initializer=tf.constant_initializer(), shape=[self.num_filters], name="conv-maxpool-%s-%s-b" % (str(filter_size), str(i)))
                    conv = tf.nn.conv2d(
                        self.embedded_chars_expanded,
                        kernel,
                        strides=[1, 1, 1, 1],
                        padding="VALID",
                        name="conv")
                    # Apply nonlinearity
                    h = tf.nn.relu(tf.nn.bias_add(conv, bias), name="relu")
                    # Max-pooling over the outputs
                    pooled = tf.nn.max_pool(
                        h,
                        ksize=[1, 1, self.sequence_length - filter_size + 1, 1],
                        strides=[1, 1, 1, 1],
                        padding='VALID',
                        name="pool")
                    pooled_outputs.append(pooled)

            pooled_outputs_all.append(pooled_outputs)

        # Combine all the pooled features
        self.num_filters_total = self.num_filters * len(self.filter_sizes)
        self.h_pool_flat_list = [tf.reshape(tf.concat(pooled_outputs, 3), [-1, self.num_filters_total]) for pooled_outputs in pooled_outputs_all]

    def _dropout(self):
        # Add dropout
        with tf.name_scope("dropout"):
            self.h_drop_list = [tf.nn.dropout(h_pool_flat, self.dropout_keep_prob) for h_pool_flat in self.h_pool_flat_list]

    def _prediction(self):
        with tf.name_scope("output"):
            self.scores_list = []
            self.predictions_list = []
            for i, h_drop in enumerate(self.h_drop_list):
                W = tf.get_variable(shape=[self.num_filters_total, self.num_classes], initializer=tf.truncated_normal_initializer(stddev=0.1), name="W-%s" % i)
                # b = tf.Variable(tf.constant(0.1, shape=[self.num_classes]), name="b")
                b = tf.get_variable(name="b-%s" % i, shape=[self.num_classes], dtype=tf.float32)
                # self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
                scores = tf.matmul(h_drop, W, name="logits-%s" % i) + b
                predictions = tf.argmax(scores, 1, name="predictions-%s" % i)
                self.scores_list.append(scores)
                self.predictions_list.append(predictions)

    def _cal_loss(self):
        # Calculate mean cross-entropy loss
        with tf.name_scope("loss"):
            losses_list = []
            for i, scores in enumerate(self.scores_list):
                # losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
                losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=scores, labels=self.input_y[i])
                loss = tf.reduce_mean(losses)
                losses_list.append(loss)
            self.loss = tf.reduce_mean(losses_list)

    def _cal_acc(self):
        # Calculate Accuracy
        with tf.name_scope("accuracy"):
            accuracy_list = []
            for i, predictions in enumerate(self.predictions_list):
                correct_predictions = tf.equal(tf.cast(predictions, tf.int32), self.input_y[i])
                accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
                accuracy_list.append(accuracy)
            self.accuracy = tf.reduce_mean(accuracy_list)

    def _grad(self):
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(1e-3)
        grads_and_vars = optimizer.compute_gradients(self.loss)
        self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

    def _save(self):
        if not tf.gfile.Exists(self.checkpointDir):
            tf.gfile.MakeDirs(self.checkpointDir)
        saver = tf.train.Saver()
        saver.save(sess=self.sess, save_path=self.checkpointDir + "model")

    def train(self):
        self.sess.run(tf.global_variables_initializer())
        print("\nbegin train ....\n")
        step = 0
        _iter = 0
        dataPreClass = DataPreClass(model="train")
        for i in range(3):
            dataPreClass.reset()
            while True:
            # for input_x, input_y in dataPreClass:
                try:
                    input_x, input_y = dataPreClass.__next__()
                    # print(input_x)
                    # print(input_y)
                    _iter += 1
                    step += len(input_x)
                    _, loss, acc, predictions_list = self.sess.run(
                        fetches=[self.train_op, self.loss, self.accuracy, self.predictions_list],
                        feed_dict={self.input_x: input_x, self.input_y: input_y, self.dropout_keep_prob: 0.5})
                    f1_list = []
                    for idx, predictions in enumerate(predictions_list):
                        f1 = f1_score(input_y[idx], predictions, average='macro')
                        f1_list.append(f1)
                    avg_f1 = np.array(f1_list).mean()

                    print("<Train>\t Epoch: [%d] Iter: [%d] Step: [%d] Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" %
                          (i + 1, _iter, step, loss, acc, avg_f1))
                except StopIteration:
                    break
            self._save()

    def test(self):
        print("\nbegin test ....\n")
        testset = DataPreClass("dev")
        while True:
        # for input_x, input_y in testset:
            try:
                input_x, input_y = testset.__next__()
                acc, loss, predictions_list = self.sess.run(
                    fetches=[self.accuracy, self.loss, self.predictions_list],
                    feed_dict={self.input_x: input_x, self.input_y: input_y, self.dropout_keep_prob: 1.})
                f1_list = []
                for idx, predictions in enumerate(predictions_list):
                    f1 = f1_score(input_y[idx], predictions, average='macro')
                    f1_list.append(f1)
                avg_f1 = np.array(f1_list).mean()

                print("Loss: [%.3F]\t Acc: [%.3f]\t f1: [%.3f]" % (loss, acc, avg_f1))
            except StopIteration:
                break


class PredictModelValidF1(object):
    def __init__(self):
        self.checkpointDir = "model/cnn/"
        # self.chinese_vocab_fp = os.path.join(this_file_path, '../../data/all_data/chinese_vocab.txt')
        self.chinese_vocab_fp = os.path.join(this_file_path, 'data/chinese_vocab.txt')

        self.labels = [1, 0, -1, -2]

        # self.labels_map = dict([(idx, str(v)) for idx, v in enumerate(labels)])
        self.labels_map = dict([(idx, v) for idx, v in enumerate(self.labels)])
        self.vocab_map = self.read_vocab_map()

        self.chan = 20
        self.word_num = 800

    def read_vocab_map(self):
        with codecs.open(self.chinese_vocab_fp, "r", encoding="utf-8") as fp:
            print("open read_vocab_map")
            vocab_list = fp.readlines()
            vocab_list = [_.strip() for _ in vocab_list]

        return dict([(str(v), idx) for idx, v in enumerate(vocab_list)])

    def _encode_content(self, content_list):
        encode_content_list = []
        for sentence in content_list:
            if len(sentence) >= self.word_num:
                sentence = sentence[:self.word_num]
            else:
                sentence = sentence + " " * (self.word_num - len(sentence))
            content_idx = [self.vocab_map.get(str(k), 0) for k in sentence]
            encode_content_list.append(np.array(content_idx).reshape(-1, self.word_num))

        return np.array(encode_content_list, dtype=np.int32).reshape(-1, self.word_num)

    def __cnn_by_meta_graph(self):
        checkpoint_file = tf.train.latest_checkpoint(self.checkpointDir)
        graph = tf.Graph()
        with graph.as_default():
            self.sess = tf.Session()
            with self.sess.as_default():
                saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
                saver.restore(self.sess, checkpoint_file)
                self.input_x = graph.get_operation_by_name("input_x").outputs[0]
                self.dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
                self.predictions_list = [graph.get_operation_by_name("output/predictions-%s" % i).outputs[0] for i in range(0,20)]

    def cnn_predict_by_meta_graph(self, content_list):
        # input_batch = self._encode_content(content_list)
        input_batch = content_list
        self.__cnn_by_meta_graph()
        batch_predictions_list = self.sess.run(self.predictions_list, {self.input_x: input_batch, self.dropout_keep_prob: 1.0})
        # batch_predictions_list_labels = [[self.labels_map[_] for _ in batch_predictions] for batch_predictions in batch_predictions_list]
        batch_predictions_list_labels = batch_predictions_list
        return np.array(batch_predictions_list_labels).transpose((1, 0)).tolist()

    def cal_f1(self, y_true, y_pred):
        y_true_arr = np.array(y_true).transpose((1, 0))
        y_pred_arr = np.array(y_pred).transpose((1, 0))
        f1_list = []
        for i in range(0, self.chan):
            f1 = f1_score(y_true_arr[i], y_pred_arr[i], average='macro')
            f1_list.append(f1)
            # f1_score(y_true_arr[0], y_pred_arr[0], average='macro')
        return np.array(f1_list).mean()

        # f_1 = tf.contrib.metrics.f1_score(labels, pred)


class PredictModelTestA(object):
    def __init__(self):
        # self.checkpointDir = "model/cnn/"
        self.checkpointDir = "model/cnn/"
        # self.chinese_vocab_fp = os.path.join(this_file_path, '../../data/all_data/chinese_vocab.txt')
        self.chinese_vocab_fp = os.path.join(this_file_path, 'data/chinese_vocab.txt')

        self.labels = [1, 0, -1, -2]

        self.labels_map = dict([(idx, v) for idx, v in enumerate(self.labels)])
        self.vocab_map = self.read_vocab_map()

        self.chan = 20
        self.word_num = 800

    def read_vocab_map(self):
        with codecs.open(self.chinese_vocab_fp, "r", encoding="utf-8") as fp:
            print("open read_vocab_map")
            vocab_list = fp.readlines()
            vocab_list = [_.strip() for _ in vocab_list]

        return dict([(str(v), idx) for idx, v in enumerate(vocab_list)])

    # def _encode_content(self, content_list):
    #     encode_content_list = []
    #     for sentence in content_list:
    #         sentence = sentence[1:-1]
    #         if len(sentence) >= self.word_num:
    #             sentence = sentence[:self.word_num]
    #         else:
    #             sentence = sentence + " " * (self.word_num - len(sentence))
    #         content_idx = [self.vocab_map.get(str(k), 0) for k in sentence]
    #         encode_content_list.append(np.array(content_idx).reshape(-1, self.word_num))
    #
    #     return np.array(encode_content_list, dtype=np.int32).reshape(-1, self.word_num)

    def __cnn_by_meta_graph(self):
        checkpoint_file = tf.train.latest_checkpoint(self.checkpointDir)
        graph = tf.Graph()
        with graph.as_default():
            self.sess = tf.Session()
            with self.sess.as_default():
                saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
                saver.restore(self.sess, checkpoint_file)
                self.input_x = graph.get_operation_by_name("input_x").outputs[0]
                self.dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
                self.predictions_list = [graph.get_operation_by_name("output/predictions-%s" % i).outputs[0] for i in range(0,20)]

    def cnn_predict_by_meta_graph(self):
        self.__cnn_by_meta_graph()
        testset = DataPreClass("test_a")
        all_rst = []
        for input_x, input_y in testset:
            input_batch = input_x
            batch_predictions_list = self.sess.run(self.predictions_list, {self.input_x: input_batch, self.dropout_keep_prob: 1.0})
            batch_predictions_list_labels = [[self.labels_map[_] for _ in batch_predictions] for batch_predictions in batch_predictions_list]
            all_rst.extend(np.array(batch_predictions_list_labels).transpose((1, 0)).tolist())

        return all_rst



def train_model():
    cnn_model = TextCNN(sequence_length=800, num_classes=4, vocab_size=8590,
                        embedding_size=128, filter_sizes=[3, 6, 9], num_filters=256)

    cnn_model.train()
    cnn_model.test()


def xx():
    testset = DataPreClass("dev")
    pm = PredictModelValidF1()
    f1_xx_list = []
    for input_x, input_y in testset:
        predict_result = pm.cnn_predict_by_meta_graph(input_x)
        f1_xx = pm.cal_f1(input_y.transpose((1, 0)), predict_result)
        print(f1_xx)
        f1_xx_list.append(f1_xx)

    avg_f1_xx = sum(f1_xx_list) / (1.0 * len(f1_xx_list))
    print(avg_f1_xx)



def xxxx():
    testa_predict = PredictModelTestA()
    all_rst = testa_predict.cnn_predict_by_meta_graph()
    with open("./data/test_a_rst.csv", "w+") as fp:

        content_str = "\n".join([" ".join(map(str, a_row)) for a_row in all_rst])
        fp.write(content_str)


def save_test_a_data():
    # dataset_dir = "~/Documents/ai_challenge/"
    dataset_dir = "~/sentiment_analysis/AI-Challenge/"


    dataset = Datasets(dataset_dir)
    test_data_a = dataset.test_data_a()

    with open("./data/test_a_rst.csv", "r") as fp:
        all_rst = fp.readlines()

    all_rst_num = [a_line.strip().split(" ") for a_line in all_rst]

    test_data_a.iloc[0:, 2:] = np.array(all_rst_num, dtype=np.int)

    print(test_data_a.iloc[1][0])

    test_data_a.to_csv("./data/test_a_hy.csv", index=False)


if __name__ == '__main__':
    train_model()
    # xx()
    # xxxx()
    #
    # save_test_a_data()


