import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import python_ai.CV_2.dl.rnn.my.hihello_data as data

ALPHA = 1e-2
ITERS = 200
EPS = 1e-20

np.random.seed(777)
tf.random.set_random_seed(777)

ph_x = tf.placeholder(tf.int32, [None, data.nb_steps], 'ph_x')
ph_y = tf.placeholder(tf.int32, [None, data.nb_steps], 'ph_y')

x_oh = tf.one_hot(ph_x, data.dict_len, dtype=tf.float32)
y_oh = tf.one_hot(ph_y, data.dict_len, dtype=tf.float32)

wxa = tf.Variable(tf.random.normal([data.dict_len, data.nb_neuron]), dtype=tf.float32, name='wxa')
wxf = tf.Variable(tf.random.normal([data.dict_len, data.nb_neuron]), dtype=tf.float32, name='wxf')
wxi = tf.Variable(tf.random.normal([data.dict_len, data.nb_neuron]), dtype=tf.float32, name='wxi')
wxo = tf.Variable(tf.random.normal([data.dict_len, data.nb_neuron]), dtype=tf.float32, name='wxo')
waa = tf.Variable(tf.random.normal([data.nb_neuron, data.nb_neuron]), dtype=tf.float32, name='waa')
way = tf.Variable(tf.random.normal([data.nb_neuron, data.dict_len]), dtype=tf.float32, name='way')
ba = tf.Variable(tf.random.normal([1, data.nb_neuron]), dtype=tf.float32, name='ba')
bf = tf.Variable(tf.random.normal([1, data.nb_neuron]), dtype=tf.float32, name='bf')
bi = tf.Variable(tf.random.normal([1, data.nb_neuron]), dtype=tf.float32, name='bi')
bo = tf.Variable(tf.random.normal([1, data.nb_neuron]), dtype=tf.float32, name='bo')
by = tf.Variable(tf.random.normal([1, data.dict_len]), dtype=tf.float32, name='by')

at_1 = tf.zeros([tf.shape(ph_x)[0], data.nb_neuron], dtype=tf.float32)
ct_1 = tf.zeros([tf.shape(ph_x)[0], data.nb_neuron], dtype=tf.float32)
h = []
for t in range(data.nb_steps):
    xt = x_oh[:, t, :]
    zt1 = tf.matmul(xt, wxa)
    zt2 = tf.matmul(at_1, waa)
    zt = zt1 + zt2 + ba
    gt = tf.tanh(zt)

    ft1 = tf.matmul(xt, wxf)
    ft = ft1 + zt2 + bf
    ft = tf.sigmoid(ft)

    it1 = tf.matmul(xt, wxi)
    it = it1 + zt2 + bi
    it = tf.sigmoid(it)

    ot1 = tf.matmul(xt, wxo)
    ot = ot1 + zt2 + bo
    ot = tf.sigmoid(ot)

    ct = ct_1 * ft + gt * it

    at = tf.tanh(ct) * ot
    ht = tf.nn.softmax(tf.matmul(at, way) + by)
    h.append(ht)
    at_1 = at
    ct_1 = ct
h = tf.convert_to_tensor(h, dtype=tf.float32)
h = tf.transpose(h, [1, 0, 2])

j = tf.negative(tf.reduce_mean(y_oh * tf.log(tf.add(h, EPS))), name='j')

optim = tf.train.AdamOptimizer(learning_rate=ALPHA).minimize(j)

# metric
acc = tf.reduce_mean(
    tf.cast(
        tf.equal(
            tf.cast(tf.argmax(h, axis=2), dtype=tf.int32),
            tf.cast(ph_y, dtype=tf.int32)
        ),
        tf.float32
    ),
    name='acc'
)

# train
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())

    cost_his = np.zeros(ITERS)
    GROUP = int(np.ceil(ITERS / 20))
    for i in range(ITERS):
        _, cost, accv = sess.run([optim, j, acc], feed_dict={ph_x: data.x_data, ph_y: data.y_data})

        cost_his[i] = cost
        if i % GROUP == 0:
            print(f'#{i + 1}: cost = {cost}, acc = {accv}')
    if i % GROUP != 0:
        print(f'#{i + 1}: cost = {cost}, acc = {accv}')

    # evaluation
    print('Predictions:')
    pred = sess.run(h, feed_dict={ph_x: data.x_data, ph_y: data.y_data})
    for i, (p_softmax_row, y_idx_row) in enumerate(zip(pred, data.y_data)):
        p_idx_row = np.argmax(p_softmax_row, axis=1)
        p_label = "".join([data.idx2char[i] for i in p_idx_row])
        y_label = "".join([data.idx2char[i] for i in y_idx_row])
        print(i, y_label, p_label)
    # accuracy
    accv = sess.run(acc, feed_dict={ph_x: data.x_data, ph_y: data.y_data})
    print(f'Accuracy: {accv}')

    # cost function curve
    print('Please check and clost the plotting window to go on ...')
    plt.plot(cost_his)
    plt.show()

    print('Over')
