from python_ai.common.xcommon import *
import tensorflow.compat.v1 as tf
import tensorflow as tsf
import numpy as np
import os
import pandas as pd
import re

np.random.seed(777)
tf.set_random_seed(777)

is_test = False
train_set_rate = 0.9
n_cls = 2
alpha = 0.001
n_embedding = 200
n_hidden = 220
n_fc = 128
batch_size = 100
n_epoch = 20

sep('load data')
data_path = r'../../../../../large_data/DL1/cnn_text'
prefix = 'rt-polarity'
if is_test:
    infix = '2'
else:
    infix = ''
pos_path = data_path + '/' + prefix + infix + '.pos'
neg_path = data_path + '/' + prefix + infix + '.neg'
with open(pos_path, 'r', encoding='utf8') as f:
    pos_sentences = f.readlines()
with open(neg_path, 'r', encoding='utf8') as f:
    neg_sentences = f.readlines()
len_pos = len(pos_sentences)
len_neg = len(neg_sentences)
m = len_pos + len_neg
print(f'len pos: {len_pos}, len neg: {len_neg}')
y_pos = [1] * len_pos
y_neg = [0] * len_neg
print('pos_sentences', type(pos_sentences))
print('neg_sentences', type(neg_sentences))
sentences = pos_sentences + neg_sentences
print('sentences', type(sentences))
y = y_pos + y_neg

# shuffle
a = np.random.permutation(m)
sentences = np.array(sentences)  # ATTENTION The flex indexing is only available for ndarrays not lists!
y = np.array(y)
sentences = sentences[a]
y = y[a]
y = np.eye(n_cls)[y]
check_shape(sentences, 'sentences')
check_shape(y, 'y')

print(sentences[:3])

sep('process data')


def process_and_filter_string(str):
    str = re.sub(r'[^-a-zA-Z0-9\.,\'"`/_~ ]', '', str)
    str = re.sub(r'(\'[a-zA-Z0-9-_~]+)', r' \1', str)
    str = re.sub(r'[\.,/"`]', ' ', str)
    return str


sentences = [process_and_filter_string(s) for s in sentences]
print(sentences[:3])

# ATTENTION Need not split
# sentences = [s.split() for s in sentences]
# print(sentences[:3])

sep('vocabulary')
max_doc_len = max([len(s.split()) for s in sentences])
print(f'max_doc_len: {max_doc_len}')
vocab_proc = tsf.contrib.learn.preprocessing.VocabularyProcessor(max_doc_len)
x_idx = list(vocab_proc.fit_transform(sentences))
check_shape(x_idx, 'x_idx')
print(x_idx[:3])
vocab_len = len(vocab_proc.vocabulary_)
sep('vocub_len')
print(vocab_len)

sep('split')
m_train = int(np.ceil(m * train_set_rate))
x_idx_train, x_idx_test = np.split(x_idx, [m_train])
y_train, y_test = np.split(y, [m_train])
g_batch_i = 0


def netx_batch(batch_size):
    global g_batch_i
    bx = x_idx_train[g_batch_i:g_batch_i + batch_size]
    by = y_train[g_batch_i:g_batch_i + batch_size]
    g_batch_i += batch_size
    return bx, by


sep('placeholder')
with tf.variable_scope('Input'):
    ph_x = tf.placeholder(tf.int32, [None, max_doc_len], 'ph_x')
    ph_y = tf.placeholder(tf.int32, [None, n_cls], 'ph_y')

sep('embedding')
with tf.variable_scope('Embedding'):
    W = tf.Variable(tf.random.uniform([vocab_len, n_embedding], -1., 1., dtype=tf.float32), dtype=tf.float32, name='W')
    print(W)  # (vocab_len, n_embedding)
    x_embed = tf.nn.embedding_lookup(W, ph_x)
    print(x_embed)  # (?, max_doc_len, n_embedding)

sep('RNN')
with tf.variable_scope('RNN'):
    cell = tf.nn.rnn_cell.LSTMCell(n_hidden)
    outputs, states = tf.nn.dynamic_rnn(cell, x_embed, dtype=tf.float32)
    print(outputs)  # (?, max_doc_len, n_hidden)

sep('FC')
with tf.variable_scope('FC'):
    outputs = outputs[:, -1]
    print(outputs)  # (?, n_hidden)
    fc = tsf.contrib.layers.fully_connected(outputs, n_fc, activation_fn=tf.nn.relu)  # (?, n_fc)
    logits = tsf.contrib.layers.fully_connected(fc, n_cls, activation_fn=None)  # (?, n_cls)

sep('cost')
with tf.variable_scope('Cost'):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=ph_y))
    tf.summary.scalar('cost', cost)

sep('train')
with tf.variable_scope('Train'):
    train = tf.train.AdamOptimizer(learning_rate=alpha).minimize(cost)

sep('metrics')
with tf.variable_scope('Metrics'):
    acc = tf.reduce_mean(
        tf.cast(
            tf.equal(
                tf.argmax(logits, axis=1),
                tf.argmax(ph_y, axis=1)
            ),
            dtype=tf.float32
        )
    )
    tf.summary.scalar('acc', acc)

sep('summary')
with tf.variable_scope('Summary'):
    summary = tf.summary.merge_all()

with tf.Session() as sess:
    with tf.summary.FileWriter('./_log/' + os.path.basename(__file__), sess.graph) as fw:
        sess.run(tf.global_variables_initializer())

        g_step = 0
        for epoch in range(n_epoch):
            g_batch_i = 0
            total_batch = int(np.ceil(m_train / batch_size))
            group = int(np.ceil(total_batch / 10))
            for i in range(total_batch):
                bx, by = netx_batch(batch_size)
                _, costv, accv, sv = sess.run([train, cost, acc, summary], feed_dict={ph_x: bx, ph_y: by})
                fw.add_summary(sv, g_step)
                g_step += 1
                if i % group == 0:
                    print(f'g_step#{g_step}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
                    fw.flush()
            if i % group != 0:
                print(f'g_step#{g_step}: epoch#{epoch + 1}: batch#{i + 1}: cost = {costv}, acc = {accv}')
                fw.flush()

    accv = sess.run(acc, feed_dict={ph_x: x_idx_test, ph_y: y_test})
    print(f'Testing acc = {accv}')