# -*- coding:utf-8 -*-

# @Time    : 2019-01-09 17:14

# @Author  : Swing


import tensorflow as tf
import time
import numpy as np

data = u'''观自在菩萨，行深般若波罗蜜多时，照见五蕴皆空，度一切苦厄。
舍利子，色不异空，空不异色，色即是空，空即是色，受想行识， 亦复如是。
舍利子，是诸法空相，不生不灭， 不垢不净，不增不减。
是故空中无色，无受想行识，无眼耳鼻舌身意，无色声香味触法，无眼界，乃至无意识界，无无明，亦无无明尽，乃至无老死，亦无老死尽。
无苦集灭道， 无智亦无得，以无所得故。菩提萨埵，依般若波罗蜜多故，心无挂碍，无挂碍故，无有恐怖，远离颠倒梦想，究竟涅盘。
三世诸佛， 依般若波罗蜜多故，得阿耨多罗三藐三菩提。故知般若波罗蜜多是大神咒，是大明咒，是无上咒，是无等等咒，能除一切苦，真实不虚。
故说般若波罗蜜多咒，即说咒曰：揭谛揭谛，波罗揭谛，波罗僧揭谛，菩提娑婆诃。''' * 5

chars = list(set(data))
data_size, num_classes = len(data), len(chars)
print('Data has %d characters. %d unique' % (data_size, num_classes))

char_to_ix = {ch: i for i, ch in enumerate(chars)}
ix_to_char = {i: ch for i, ch in enumerate(chars)}

ix_to_char[num_classes + 1] = '<EOF>'

state_size = 32
seq_length = 25
learning_rate = 1e-3
batch_size = 1

raw_x = [char_to_ix[ch] for ch in data]
raw_y = [char_to_ix[ch] for ch in data[1:]]
raw_y.append(num_classes - 1)


def get_batch():
    data_partition_size = data_size // batch_size
    data_x = np.zeros([batch_size, data_partition_size], dtype=np.int32)
    data_y = np.zeros([batch_size, data_partition_size], dtype=np.int32)

    for i in range(batch_size):
        data_x[i] = raw_x[data_partition_size * i: data_partition_size * (i + 1)]
        data_y[i] = raw_y[data_partition_size * i: data_partition_size * (i + 1)]

    epoch_size = data_partition_size // seq_length

    for i in range(epoch_size):
        x = data_x[:, seq_length * i: seq_length * (i + 1)]
        y = data_y[:, seq_length * i: seq_length * (i + 1)]
        yield (x, y)


tf.reset_default_graph()

x = tf.placeholder(tf.int32, [None, None], name='input_placeholder')
y = tf.placeholder(tf.int32, [None, None], name='labels_placeholder')

init_state = tf.zeros([batch_size, state_size])

rnn_inputs = tf.one_hot(x, num_classes)

cell = tf.nn.rnn_cell.BasicRNNCell(state_size)
rnn_outputs, final_state = tf.nn.dynamic_rnn(cell, rnn_inputs, initial_state=init_state)

with tf.variable_scope('softmax'):
    w_o = tf.get_variable('w_o', [state_size, num_classes])
    b_o = tf.get_variable('b_o', [num_classes], initializer=tf.constant_initializer(0.0))

logits = tf.reshape(
    tf.matmul(tf.reshape(rnn_outputs, [-1, state_size]), w_o) + b_o,
    [batch_size, -1, num_classes]
)

predictions = tf.nn.softmax(logits)

losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits)

total_loss = tf.reduce_sum(losses)

train_step = tf.train.AdamOptimizer(learning_rate).minimize(total_loss)


def train_network(sess, num_steps, state_size=4, verbose=True, training_state=None):
    training_losses = []
    training_loss = 0
    if training_state is None:
        training_state = np.zeros([batch_size, state_size])

    start_time = time.time()
    smooth_loss = -np.log(1.0 / num_classes) * seq_length

    for step, (x_v, y_v) in enumerate(get_batch()):
        tr_loss, training_loss_, training_state, _, predictions_value = sess.run(
            [losses, total_loss, final_state, train_step, predictions], feed_dict={
                x: x_v,
                y: y_v
            })

        smooth_loss = smooth_loss * 0.999 + training_loss_ * 0.001

        if step % 100 == 0 and step > 0:
            if verbose:
                print("Average loss at step", step,
                      "for last 100 steps:", smooth_loss, training_loss_)

                end_time = start_time
                print('Time cost :%f' % (end_time - start_time) * 1000)
                start_time = time.time()
                print(u'input:data:%s' % ''.join(ix_to_char[ix] for ix in x_v[0, :]))

                char_results = np.argmax(predictions_value, axis=-1)
                print(u'predict data:%s' % ''.join(ix_to_char[ix] for ix in char_results.ravel()))
                print(u'ground truth:%s' % ''.join(ix_to_char[ix] for ix in y_v[0, :]))

    init_char = ix_to_char[x_v[0, -1]]
    choice_input = [[char_to_ix[init_char]]]
    result_list = []
    result_list.append(init_char)
    inference_state_value = training_state.copy()
    for _ in range(500):
        p, inference_state_value = sess.run([predictions, final_state],
                                            feed_dict={
                                                x: choice_input,
                                                init_state: inference_state_value
                                            })
        c = np.argmax(p.ravel())
        choice_input = [[c]]
        result_list.append(ix_to_char[c])
    print('-----predict some text-----\n%s\n++++++++++++++' % ''.join(result_list))
    return smooth_loss, training_state,


sess = tf.Session()
sess.run(tf.global_variables_initializer())
training_state = np.zeros((batch_size, state_size))

train_epochs = 2000
for epoch in range(train_epochs):
    print('epoch : %d' % epoch)
    training_losses, training_state = train_network(sess, seq_length,
                                                    state_size=state_size,
                                                    training_state=training_state)

init_char = u'观'
choice_input = [[char_to_ix[init_char]]]
result_list = []
result_list.append(init_char)
# init_state_value = np.zeros_like(training_state)
inference_state_value = training_state.copy()

p, inference_state_value = sess.run([predictions, final_state],
                                    feed_dict={
                                        x: choice_input,
                                        init_state: inference_state_value
                                    }
                                    )
c = np.random.choice(range(num_classes), p=p.ravel())
# c = np.argmax(p.ravel())
choice_input = [[c]]
result_list.append(ix_to_char[c])
print('-----predict some text-----\n%s\n++++++++++++++' % ''.join(result_list))
