# coding=utf-8

import os
import tensorflow as tf
import pickle
import time
import datetime

from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
stemmer = PorterStemmer()
lemmatizer = WordNetLemmatizer()

# import tensorflow.contrib.eager as tfe
tf.enable_eager_execution()

import numpy as np
import itertools

tf.reset_default_graph()

load_pickle = False
dump_pickle = True
enable_training_breakpoint = True

# Text-CNN Parameter
embedding_size = 64 # n-gram
num_classes = 2
filter_sizes = [3,4,5] # n-gram window
dropout_keep_prob_during_training = 0.7
num_filters = 64

# TODO:
# 1, 样本很多时的make_batch没做好
# 2, "i sorry hate you" 四个单词的model的shape比较奇怪 -> fixed, 原因是原来harc_coded sequence_length=3
# 3, "love me" 2个单词的则直接报错 -> fixed, 同上
# 4, 增加不定长句子的训练, max_pool的ksize究竟是什么意思，是否可以不需要



# Model-Predict
# hypothesis = tf.nn.softmax(model)
# predictions = tf.argmax(hypothesis, 1)
def build_word_dict(texts):
    words_set = set()
    max_length_sentence_in_corpus = 0
    for text in texts:
        words = text.split()
        max_length_sentence_in_corpus = len(words) if len(words) > max_length_sentence_in_corpus else max_length_sentence_in_corpus
        if len(words) >= 199:
            print(text)
        words_set |= set(words)
    word_dict = {w: i + 1 for i, w in enumerate(words_set)}  # skip 0, 0 stand for non word, as placeholder
    return max_length_sentence_in_corpus, word_dict

def process_sentence(text):
    def stemming(text):
        return " ".join([stemmer.stem(w) for w in sent.split()])
    def lemmatization(text):
        return " ".join([lemmatizer.lemmatize(w) for w in sent.split()])
    def remove_punctuation(text):
        import re
        import string
        text = re.sub(r'[{}]+'.format(string.punctuation), '', text)
        return text.strip().lower()
    sent = remove_punctuation(text)
    sent = stemming(text)
    # sent = " ".join([porter_stemmer.stem(w) for w in sent.split()])
    return sent

def read_corpus(corpus_folder, file_pattern):
    import os
    import re
    import codecs

    # corpus_folder = u"E:/___机器学习数据集/amazonreviews"
    lines = list()
    corpus_filepath_lst = list()
    for root, dirs, files in os.walk(corpus_folder):
        for f in files:
            if f.endswith(file_pattern) and not f.startswith("~$"):
                corpus_filepath_lst.append(root + "/" + f)

    for corpus_filepath in corpus_filepath_lst:
        print("reading %s " % corpus_filepath)
        with codecs.open(corpus_filepath, mode='r', encoding="utf-8") as opened_file:
            lines += opened_file.readlines()
    pattern = r"__label__(\d)"
    x_fit_text = list()
    y_fit_label = list()
    for line in lines:
        matcher = re.match(pattern, line, flags=re.IGNORECASE)
        if matcher:
            # sentiment = "negative" if matcher.group(1) == "1" else "positive"
            sentiment = 1 if matcher.group(1) == "1" else 0  # 1==
            y_fit_label.append(sentiment)
            sent = re.sub(pattern, '', line, flags=re.IGNORECASE)
            sent = process_sentence(sent)
            x_fit_text.append(sent)
    return x_fit_text, y_fit_label

def word_embedding(Wx, texts):
    inputs = []
    for text in texts:
        words = text.split()
        a = list()
        for i in range(max_sentence_length):
            if i > len(words) - 1:
                a.append(0)
            else:
                n = words[i]
                if n in word_dict:
                    a.append(word_dict[n])
                else:
                    a.append(0)
        inputs.append(np.asarray(a))

    embedded_chars = tf.nn.embedding_lookup(Wx, inputs)  # [batch_size, sequence_length, embedding_size]
    embedded_chars = tf.expand_dims(embedded_chars, -1)  # add channel(=1) [batch_size, sequence_length, embedding_size, 1]
    return embedded_chars

# def conv_layer(input, conv_w, conv_b):
#     conv = tf.nn.conv2d(input,  # [batch_size, sequence_length, embedding_size, 1] # 指需要做卷积的输入图像（tensor），具有[batch,in_height,in_width,in_channels]这样的4维shape，分别是图片数量、图片高度、图片宽度、图片通道数，数据类型为float32或float64。
#                             conv_w,  # [filter_size(n-gram window), embedding_size, 1, num_filters(=3)] # 相当于CNN中的卷积核，它是一个tensor，shape是[filter_height,filter_width,in_channels,out_channels]：滤波器高度、宽度、图像通道数、滤波器个数，数据类型和input相同。
#                             strides=[1, 1, 1, 1],  # strides：步长，是一个四维的张量[1, 1, 1, 1]，第一位与第四位固定为1，第二第三为长宽上的步长这里都是设为1
#                             padding='VALID')
#     h = tf.nn.relu(tf.nn.bias_add(conv, conv_b))
#     return h
# def make_batch(index, X, Y, batch_size=100):
#     start_index = index * batch_size
#     x_batch = X[start_index: batch_size]
#     y_batch = Y[start_index: batch_size]
#
#     return x_batch, y_batch
    # input_batch = []
    # target_batch = []
    #
    # for line in lines:
    #     target = int(line.split("->")[0])
    #     target_batch.append(np.eye(num_classes)[target])
    #
    #     sentence = line.split("->")[1]
    #     input_batch.append(sentence)
    #
    # X = word_embedding(Wx, input_batch)
    #
    # return input_batch, target_batch

def cnn_model(embedded_chars, sequence_length, dropout_keep_prob=1.0):
    pooled_outputs = []
    for i, filter_size in enumerate(filter_sizes):
        conv_w = conv_weight_list[i]
        conv_b = conv_bias_list[i]

        conv = tf.nn.conv2d(embedded_chars,  # [batch_size, sequence_length, embedding_size, 1] # 指需要做卷积的输入图像（tensor），具有[batch,in_height,in_width,in_channels]这样的4维shape，分别是图片数量、图片高度、图片宽度、图片通道数，数据类型为float32或float64。
                            conv_w,  # [filter_size(n-gram window), embedding_size, 1, num_filters(=3)] # 相当于CNN中的卷积核，它是一个tensor，shape是[filter_height,filter_width,in_channels,out_channels]：滤波器高度、宽度、图像通道数、滤波器个数，数据类型和input相同。
                            strides=[1, 1, 1, 1],  # strides：步长，是一个四维的张量[1, 1, 1, 1]，第一位与第四位固定为1，第二第三为长宽上的步长这里都是设为1
                            padding='VALID')  # 定义元素边框和元素内容之间的空间，只能是‘SAME’（边缘填充）或者‘VALID’（边缘不填充）
        h = tf.nn.relu(tf.nn.bias_add(conv, conv_b))
        pooled = tf.nn.max_pool(h,
                                ksize=[1, sequence_length - filter_size + 1, 1, 1],
                                # ksize=[1, h.shape[1], h.shape[2], 1],
                                # [batch_size, filter_height, filter_width, channel]
                                strides=[1, 1, 1, 1],
                                padding='VALID')
        pooled_outputs.append(pooled)

    # * len(filter_sizes) # Combine all the pooled features
    num_filters_total = num_filters * len(filter_sizes)
    h_pool = tf.concat(pooled_outputs, axis=3)  # h_pool : [batch_size(=6), output_height(=1), output_width(=1), channel(=1) * 3]
    # 在 tf.reshape中使用-1是告诉TensorFlow把维度展平，作为全连接层的输入
    h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])  # [batch_size, ]
    # Add dropout，以概率1-dropout_keep_prob，随机丢弃一些节点
    h_drop = tf.nn.dropout(h_pool_flat, dropout_keep_prob)
    model = tf.nn.xw_plus_b(h_drop, dense_w, dense_b)  # tf.nn.xw_plus_b 是进行 Wx+b 矩阵乘积的方便形式

    return model


def calc_cost(model, labels):
    # softmax_cross_entropy_with_logits 为每一个输入项结果计算一个损失, 返回值arrayList, 长度N=Batch
    # reduce_mean 再求均值
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=model, labels=labels))
    return cost

if not load_pickle:  # start to training
    print("reading corpus...")
    reading_corpus_start_time = time.time()
    sentences, labels = read_corpus(u"E:/___机器学习数据集/amazonreviews", ".1k.txt")
    # sentences, labels = read_corpus(u"E:/___机器学习数据集/amazonreviews", ".100k.txt")
    print("finished reading corpus, take %ss" % (time.time() - reading_corpus_start_time))

    if enable_training_breakpoint and os.path.exists('_temp_textcnn_model.pkl'):
        [epoch_breakpoint, Wx, word_dict, max_sentence_length, conv_weight_list, conv_bias_list, dense_w, dense_b] = pickle.load(open("_temp_textcnn_model.pkl", "rb"))
    else:  # 重新初始化所有trainable variable
        epoch_breakpoint = 0
        max_sentence_length, word_dict = build_word_dict(sentences)
        if max_sentence_length >= 150:
            max_sentence_length = 150
        vocab_size = len(word_dict)

        Wx = tf.Variable(tf.random_uniform([vocab_size + 1, embedding_size], -1.0, 1.0))  # 随机初始化向量组用来后面做embedding

        # 初始化所有卷积核权重和偏置
        conv_weight_list = list()
        conv_bias_list = list()
        for i, filter_size in enumerate(filter_sizes):
            filter_shape = [filter_size, embedding_size, 1, num_filters]
            conv_w = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1))
            conv_b = tf.Variable(tf.constant(0.1, shape=[num_filters]))
            conv_weight_list.append(conv_w)
            conv_bias_list.append(conv_b)

        # 定义全连接层的权重和偏置
        dense_w = tf.get_variable('W', shape=[num_filters * len(filter_sizes), num_classes],
                                  initializer=tf.contrib.layers.xavier_initializer())  # 该函数返回一个用于初始化权重的初始化程序 “Xavier”，这个初始化器是用来保持每一层的梯度大小都差不多相同。
        dense_b = tf.Variable(tf.constant(0.1, shape=[num_classes]))

    X = word_embedding(Wx, sentences)

    outputs = []
    for out in labels:
        outputs.append(np.eye(num_classes)[out])
    Y = outputs

    all_trainable_var = conv_weight_list + conv_bias_list + [dense_w] + [dense_b]
    optimizer = tf.train.AdamOptimizer(1e-3)  # .minimize(cost)

    batch_size = 5000
    batch_num = int(((len(X) - 1)/batch_size) + 1)

    epoch_start_tick = time.time()
    for epoch in range(epoch_breakpoint, 1000):
        for batch_index in range(batch_num):
            start_index = batch_index * batch_size
            end_index = min((batch_index + 1) * batch_size, len(X))
            x_batch = X[start_index:end_index]
            labels_batch = labels[start_index:end_index]
            y_batch = Y[start_index:end_index]

            with tf.GradientTape() as tape:
                model = cnn_model(x_batch, max_sentence_length, dropout_keep_prob_during_training)
                cost = calc_cost(model, y_batch)

            # gradients = tape.gradient(cost, [conv_w, conv_b, dense_w, dense_b])
            gradients = tape.gradient(cost, all_trainable_var)
            # print(gradients)

            optimizer.apply_gradients(zip(gradients, all_trainable_var))

        if epoch % 10 == 0:
            hypothesis = tf.nn.softmax(model)
            predictions = tf.argmax(hypothesis, 1)
            correct_predictions = tf.equal(predictions, labels_batch)
            accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, "float"), name="accuracy")
            time_str = datetime.datetime.now().isoformat()
            print("take %ss finish %s, epoch: %s, batch: %s, Train loss is : %f, Train accuracy is : %f" % ((time.time() - epoch_start_tick), time_str,
            epoch, batch_index, cost, accuracy))
            epoch_start_tick = time.time()
            print("dense_b is: %s" % dense_b)
            if enable_training_breakpoint:
                with open("_temp_textcnn_model.pkl", 'wb') as fw:
                    pickle.dump(
                        [epoch + 1, Wx, word_dict, max_sentence_length, conv_weight_list, conv_bias_list, dense_w, dense_b], fw)


    print("training finished...")
    print("last epoch: %s, Train loss is : %f, Train accuracy is : unknown" % (epoch, cost))
    print("dense_b is: %s" % dense_b)

    if dump_pickle:
        with open("textcnn_model.pkl", 'wb') as fw:
            pickle.dump([Wx, word_dict, max_sentence_length, conv_weight_list, conv_bias_list, dense_w, dense_b], fw)
else:
    [Wx, word_dict, max_sentence_length, conv_weight_list, conv_bias_list, dense_w, dense_b] = pickle.load(open("textcnn_model.pkl", "rb"))
    sequence_length = max_sentence_length

def predict(embedded_chars_pred):


    # embedded_chars_pred = word_embedding(Wx, [text])
    model = cnn_model(embedded_chars_pred, max_sentence_length)
    # Model-Predict
    hypothesis = tf.nn.softmax(model)
    # print("hypothesis is %s" % hypothesis)
    predictions = tf.argmax(hypothesis, 1)
    return predictions.numpy().tolist()
    # result = predictions.numpy()[0]
    # return result
    # print("pred is %s" % predictions.numpy()[0])


# pooled_outputs = []
# predict("you hate you")

# sentences, labels = read_corpus(u"E:/___机器学习数据集/amazonreviews", ".85k-100k.txt")
sentences, labels = read_corpus(u"E:/___机器学习数据集/amazonreviews", ".100k-105k.txt")

embedded_chars_pred = word_embedding(Wx, sentences)
predictions = predict(embedded_chars_pred)
# predictions = [predict(sent) for sent in sentences]

correct_predictions = tf.equal(predictions, labels)
accuracy = tf.reduce_mean(input_tensor=tf.cast(correct_predictions, "float"), name="accuracy")
print("test accuracy is : %f" % accuracy)
# result = predict("sizes recomended in the size chart are not real: sizes are much smaller than what is recomended in the chart. I tried to put it and sheer it!. I guess you should not buy this item in the internet..it is better to go to the store and check it")
# result = predict("great buy...: good price, fast shipping! what more could you want? it serves it's purpose for simple around the house/shop measurements.")
#
# if result == 0:
#     print(text, " is positive...")
# elif result == 1:
#     print(text, " is negative...")
# else:
#     print(text, " unown...")

    # predict('she like baseball')
# predict('sorry for that')
# pooled_outputs = []
# predict("that is awful you hate me he hate you")
# tf.saved_model.save(to_export, '/tmp/adder')


    # optimizer.minimize(cost)
    # with tf.GradientTape() as tape:
    #     current_cost = cost(X, Y)
    #     print(current_cost)

    # grads = tape.gradient(cost, weights)
    #
    # grads_and_vars = optimizer.compute_gradients(cnn.loss)
    # optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))