from python_ai.common.xcommon import *
import tensorflow as tsf
import tensorflow.compat.v1 as tf
import numpy as np
from python_ai.DL.tensorflow.common.text_proc import *
import os

file_name = os.path.basename(__file__)
np.random.seed(777)
tf.set_random_seed(777)

ver = 'v1.0'
n_epoch = 10
is_test = False
learning_rate = 0.001  #学习率
training_epochs = 10 #训练总周期
batch_size = 200 #每批样本
EMBEDDING_SIZE = 128
n_fc1_hidden = 1024
n_onehot = 2
dev_sample_percentage = 0.1 #测试集的比例
path = r'..\..\..\..\large_data\DL1\cnn_text\\'

sep('Load data')
if is_test:
    xname = '2'
else:
    xname = ''
positive_data_file = path + 'rt-polarity' + xname + '.pos'  #正面评价文本
negative_data_file = path + 'rt-polarity' + xname + '.neg'  #负面评价文本
with open(positive_data_file, 'r', encoding='utf-8') as f:
    pos_ex = f.readlines()
    print(type(pos_ex))
    print(pos_ex[:5])
with open(negative_data_file, 'r', encoding='utf8') as f:
    neg_ex = f.readlines()
    print(type(neg_ex))
    print(neg_ex[:5])

# splice pos and neg
x_sentences = pos_ex + neg_ex
# format sentences
x_sentences = [format_and_filter_string(sent) for sent in x_sentences]
print(x_sentences[:5])
pos_oh = [[0, 1] for _ in pos_ex]
neg_oh = [[1, 0] for _ in neg_ex]
y_oh = np.r_[pos_oh, neg_oh]

sep('dictionary')
# max doc len
max_doc_len = max([len(sent.split(' ')) for sent in x_sentences])
print(f'max doc len: {max_doc_len}')
vocab_proc = tsf.contrib.learn.preprocessing.VocabularyProcessor(max_doc_len)
x_idxs = vocab_proc.fit_transform(x_sentences)
vocab_len = len(vocab_proc.vocabulary_)
print(f'vocab len: {vocab_len}')
x = np.array(list(x_idxs))
print('x')
print(x[:2])

sep('shuffle data')
m = len(y_oh)
rand_idx = np.random.permutation(m)
x = x[rand_idx]
y_oh = y_oh[rand_idx]

sep('split')
border = -1 * int(m * dev_sample_percentage)
m_train = m + border
m_test = - border
x_train, x_test = np.vsplit(x, [border])
y_train, y_test = np.vsplit(y_oh, [border])
check_shape(x_train, 'x_train')
check_shape(x_test, 'x_test')
check_shape(y_train, 'y_train')
check_shape(y_test, 'y_test')

sep('func: next batch')
g_b=0
# 自己实现next_batch函数，每次返回一批数据


def next_batch(size):
    global g_b
    xb = x_train[g_b:g_b+size]
    yb = y_train[g_b:g_b+size]
    g_b = g_b + size
    if g_b >= m_train:
        g_b = 0
    return xb,yb


with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.int32, [None, max_doc_len], 'ph_x')
    ph_y = tf.placeholder(tf.int32, [None, n_onehot], 'ph_y')

with tf.variable_scope('Embedding'):
    params = tf.Variable(tf.random.uniform([vocab_len, EMBEDDING_SIZE], -1., 1.))  # vocab_len x emb_size
    print(params)
    # https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/nn/embedding_lookup
    # https://stackoverflow.com/questions/34870614/what-does-tf-nn-embedding-lookup-function-do
    embedded = tf.nn.embedding_lookup(params, ph_x)  # ? x max_doc_len x emb_size
    sep('embedded')
    print(embedded)

with tf.variable_scope('C1'):
    c1_input = tf.expand_dims(embedded, -1)
    sep('c1_input')
    print(c1_input)
    filter1 = tf.Variable(tf.random.normal([3, 3, 1, 32]), dtype=tf.float32, name='filter1')
    conv1 = tf.nn.conv2d(c1_input, filter1, strides=[1, 1, 1, 1], padding='SAME', name='conv1')
    relu1 = tf.nn.relu(conv1, 'relu1')
    # 4.	对第1层的数据做最大池化（8分）==>8*8*32
    pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool1')

# 5.	建第二个卷积层、relu（6分）8*8*32==>8*8*64
with tf.variable_scope('C2'):
    filter2 = tf.Variable(tf.random.normal([3, 3, 32, 64]), dtype=tf.float32, name='filter2')
    conv2 = tf.nn.conv2d(pool1, filter2, strides=[1, 1, 1, 1], padding='SAME', name='conv2')
    relu2 = tf.nn.relu(conv2, 'relu1')
    # 6.	对第2层的最大池化（6分）==>4*4*64
    pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='pool2')

# 7.	全连接层（8分）4*4*64==>10
with tf.variable_scope('FC'):
    fc_dim = pool2.shape[1] * pool2.shape[2] * pool2.shape[3]
    fc_in = tf.reshape(pool2, [-1, fc_dim], name='fc_in')
    fc1 = tsf.contrib.layers.fully_connected(fc_in, n_fc1_hidden, activation_fn=tf.nn.relu)
    logits = tsf.contrib.layers.fully_connected(fc1, n_onehot, activation_fn=None)

# 8.	计算代价或损失函数（8分）
with tf.variable_scope('Cost_acc_summary'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))
    predict = tf.argmax(logits, axis=1)
    print(predict)
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                predict,
                tf.argmax(ph_y, axis=1)
            ),
            dtype=tf.float32
        )
    )
    tf.summary.scalar('cost', cost)
    tf.summary.scalar('acc', acc)
    summary = tf.summary.merge_all()

# 9.	使用adam优化器（8分）
with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)

with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    save_path = './save/' + file_name + '_' + ver
    saver = tf.train.Saver()
    if os.path.exists(save_path + '.meta'):
        saver.restore(sess, save_path)
        print('SESSION LOADED!')
    else:
        with tf.summary.FileWriter('./log/' + file_name, sess.graph) as fw:
            print('TRAINING STARTED!')
            g_step = -1
            # 10.	一共迭代15次，每次选择总样本的前80%做为训练集数据（8分）
            for epoch in range(n_epoch):
                total_batch = int(np.ceil(m_train / batch_size))
                g_batch_i = 0
                group = int(np.ceil(total_batch / 10))
                for i in range(total_batch):
                    g_step += 1
                    # 11.	分批次训练，每批100个训练样本（8分）
                    bx, by = next_batch(batch_size)
                    _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                    fw.add_summary(sv, g_step)
                    if i % group == 0:
                        print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc= {accv}')
                        fw.flush()
                    if np.isclose(1.0, accv):
                        break
                if i % group != 0:
                    print(f'g_step#{g_step + 1}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc= {accv}')
                    fw.flush()
                if np.isclose(1.0, accv):
                    print('TRAINING CONVERGED!')
                    break
            print('TRAINING OVER!')
            saver.save(sess, save_path)
            print('SESSION SAVED!')

    # 12.	选取最后的20%数据做测试集，计算准确率（8分）
    accv = sess.run(acc, feed_dict={ph_x: x_test, ph_y: y_test})
    print('测试集，计算准确率')
    print(accv)
