from python_ai.common.xcommon import *
import tensorflow as tsf
import tensorflow.compat.v1 as tf
import numpy as np
from python_ai.DL.tensorflow.common.text_proc import *
import os

np.random.seed(777)
tf.set_random_seed(777)

is_use_cache = False
ver = 'v1.0'
learning_rate = 0.001  #学习率
training_epochs = 10 #训练总周期
batch_size = 64 #每批样本
n_neuron = 128
EMBEDDING_SIZE = n_neuron
n_layer = 2
n_cls = 2
n_fc_neuron = 64
path_save_session = './_save/'
file_name = os.path.basename(__file__)
output_dir = './_investigation/'

sep('Load data')
dev_sample_percentage = 0.1 #测试集的比例
path = r'..\..\..\..\..\large_data\DL1\cnn_text\\'
is_test = False
if is_test:
    xname = '2'
else:
    xname = ''
positive_data_file = path + 'rt-polarity' + xname + '.pos'  #正面评价文本
negative_data_file = path + 'rt-polarity' + xname + '.neg'  #负面评价文本
with open(positive_data_file, 'r', encoding='gbk', errors='ignore') as f:
    pos_ex = f.readlines()
    sep(type(pos_ex))
    print(pos_ex[:5])
with open(negative_data_file, 'r', encoding='gbk', errors='ignore') as f:
    neg_ex = f.readlines()
    sep(type(neg_ex))
    print(neg_ex[:5])

# splice pos and neg
sep('splice pos and neg')
x_sentences = pos_ex + neg_ex
# format sentences
x_sentences = [format_and_filter_string(sent) for sent in x_sentences]
print(x_sentences[:5])
sep(f'data length = {len(x_sentences)}')
pos_oh = [[0, 1] for _ in pos_ex]
neg_oh = [[1, 0] for _ in neg_ex]
y = np.r_[pos_oh, neg_oh]

with open(output_dir + 'x_sentences_' + file_name + '.txt', 'w') as f:
    x_sentences_with_n = [sent + '\n' for sent in x_sentences]
    f.writelines(x_sentences_with_n)
# np.savetxt('y.np.txt', y)

sep('dictionary')
# max doc len
max_doc_len = max([len(sent.split(' ')) for sent in x_sentences])
sep(f'max doc len: {max_doc_len}')
vocab_proc = tsf.contrib.learn.preprocessing.VocabularyProcessor(max_doc_len)
x_idxs = vocab_proc.fit_transform(x_sentences)
vocab_len = len(vocab_proc.vocabulary_)

## Extract word:id mapping from the object.
vocab_dict = vocab_proc.vocabulary_._mapping

## Sort the vocabulary dictionary on the basis of values(id).
## Both statements perform same task.
#sorted_vocab = sorted(vocab_dict.items(), key=operator.itemgetter(1))
sorted_vocab = sorted(vocab_dict.items(), key = lambda x : x[1])
# print(vocab_dict)  # {'<UNK>': 0, 'the': 1, 'rock': 2, 'is': 3, 'destined': 4, 'to': 5 ...
# print(vocab_dict.items())  # dict_items([('<UNK>', 0), ('the', 1), ('rock', 2), ('is', 3), ('destined', 4), ('to', 5), ...])
# print(zip(*sorted_vocab))  # <zip object at 0x000002329013BCC8>
# print(list(zip(*sorted_vocab)))  # [('<UNK>', 'the', 'rock', 'is', 'destined', 'to', ...), (0, 1, 2, 3, 4, 5, ...)]
# print(list(zip(*sorted_vocab))[0])  # ('<UNK>', 'the', 'rock', 'is', 'destined', 'to', ...)
# print(list(list(zip(*sorted_vocab))[0]))  # ['<UNK>', 'the', 'rock', 'is', 'destined', 'to', ...]

## Treat the id's as index into list and create a list of words in the ascending order of id's
## word with id i goes at index i of the list.
# vocabulary = list(list(zip(*sorted_vocab))[0])  # list
vocabulary = list(zip(*sorted_vocab))[0]  # tuple
import pandas as pd
df = pd.DataFrame({0: vocabulary})
df.sort_values(0).to_csv(output_dir + 'vocab_' + file_name + '.txt', index=False)

sys.exit(0)

sep(f'vocab len: {vocab_len}')
x = np.array(list(x_idxs))
sep('x')
print(x[:2])

sep('shuffle data')
np.random.seed(10)
m = len(y)
rand_idx = np.random.permutation(m)
x = x[rand_idx]
y = y[rand_idx]

sep('split')
border = -1 * int(m * dev_sample_percentage)
m_train = m + border
m_test = - border
x_train, x_test = np.vsplit(x, [border])
y_train, y_test = np.vsplit(y, [border])
print(f'm_train: {m_train}')
print(f'm_test: {m_test}')
flush()
check_shape(x_train, 'x_train')  # (?, max_doc_len)
check_shape(x_test, 'x_test')
check_shape(y_train, 'y_train')  # (?, n_cls)
check_shape(y_test, 'y_test')
flush()
# np.savetxt('x_train.txt', x_train)
# np.savetxt('y_train.txt', y_train)

sep('func: next batch')
g_b=0
# 自己实现next_batch函数，每次返回一批数据
def next_batch(size):
    global g_b, x_train, y_train
    # if 0 == g_b:
    #     a = np.random.permutation(m_train)
    #     x_train = x_train[a]
    #     y_train = y_train[a]
    xb = x_train[g_b:g_b+size]
    yb = y_train[g_b:g_b+size]
    g_b += size
    if g_b >= m_train:
        g_b = 0
    return xb, yb


sep('FP')
with tf.variable_scope('Embedding'):
    ph_x = tf.placeholder(tf.int32, [None, max_doc_len], 'ph_x')
    ph_y = tf.placeholder(tf.int32, [None, n_cls], 'ph_y')
    params = tf.Variable(tf.random.uniform([vocab_len, EMBEDDING_SIZE], -1., 1.))  # (vocab_len, emb_size)
    print(params)  # (vocab_len, emb_size)
    # https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/nn/embedding_lookup
    # https://stackoverflow.com/questions/34870614/what-does-tf-nn-embedding-lookup-function-do
    x_embedded = tf.nn.embedding_lookup(params, ph_x)  # ? (vocab_len, emb_size), (?, max_doc_len) -> (?, max_doc_len, emb_size)
    print(x_embedded, 'x_embedded')  # (?, max_doc_len, embedding_size)

with tf.variable_scope('RNN'):
    cell_arr = [tf.nn.rnn_cell.LSTMCell(n_neuron) for i in range(n_layer)]
    cells = tf.nn.rnn_cell.MultiRNNCell(cell_arr)
    outputs, states = tf.nn.dynamic_rnn(cells, x_embedded, dtype=tf.float32)
    print(outputs, 'outputs')  # (?, max_doc_len, n_neuron)

with tf.variable_scope('FC'):
    outputs_last = outputs[:, -1]
    print(outputs_last, 'outputs_last')
    # fc1 = tsf.contrib.layers.fully_connected(outputs_last, n_fc_neuron, activation_fn=tf.nn.sigmoid)
    # logits = tsf.contrib.layers.fully_connected(fc1, n_cls, activation_fn=None)

    logits = tsf.contrib.layers.fully_connected(outputs_last, n_cls, activation_fn=None)

with tf.variable_scope('Cost_acc_train'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=ph_y))
    predict = tf.argmax(logits, axis=1)
    acc = tf.reduce_mean(tf.cast(
        tf.equal(predict, tf.argmax(ph_y, axis=1)),
        dtype=tf.float32
    ))
    train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

with tf.variable_scope('summary'):
    tf.summary.scalar('cost', cost)
    tf.summary.scalar('acc', acc)
    summary = tf.summary.merge_all()

with tf.Session() as sess:
    with tf.summary.FileWriter('./_log/' + file_name, sess.graph) as fw:
        sess.run(tf.global_variables_initializer())

        path = path_save_session + file_name + '_' + ver
        saver = tf.train.Saver()
        if is_use_cache and os.path.exists(path + '.meta'):
            saver.restore(sess, path)
        g_step = 0
        for epoch in range(training_epochs):
            total_batch = int(np.ceil(m_train / batch_size))
            g_b = 0
            group = int(np.ceil(total_batch / 10))
            for i in range(total_batch):
                bx, by = next_batch(batch_size)
                _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                fw.add_summary(sv, g_step)
                g_step += 1
                if 0 == i % group:
                    print(f'g# {g_step}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
            if 0 != i % group:
                print(f'g# {g_step}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
            saver.save(sess, path)

        accv_test = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
        print(f'Accuracy on test data: {accv_test}')
