# -*- coding:utf-8 -*-

# @Time    : 19-1-4 下午11:19

# @Author  : Swing


import numpy as np


def range_matrix(r, c):
    return np.arange(r * c).reshape((r, c)) * 0.1 + 0.1


input_len = 3
num_classes = 3
n, p = 0, 0
hidden_size = 2  # size of hidden layer of neurons
seq_length = 3  # number of steps to unroll the RNN for
learning_rate = 1

data_len = 50000
x = np.arange(data_len) + 1

ground_truth = [(x[i - 1] + x[i - 2]) % 3 for i in range(data_len)]

# model parameters
U = range_matrix(hidden_size, input_len)  # input to hidden
W = range_matrix(hidden_size, hidden_size)  # hidden to hidden
V = range_matrix(num_classes, hidden_size)  # hidden to output
bs = np.zeros((hidden_size, 1))  # hidden bias
bo = np.zeros((num_classes, 1))  # output bias


def forward_and_backprop(inputs, targets, hprev):
    xs, hs, ys, ps = {}, {}, {}, {}
    hs[-1] = np.copy(hprev)
    loss = 0
    # forward pass
    for t in range(seq_length):
        xs[t] = inputs[t:t + 3].reshape(input_len, 1)  # make a matrix(rank 2)
        hs[t] = np.tanh(np.dot(U, xs[t]) + np.dot(W, hs[t - 1]) + bs)  # 计算hidden state。激活函数使用tanh
        ys[t] = np.dot(V, hs[t]) + bo  # 计算output logits。注意这里没有激活函数，我们将在下一步计算softmax

        ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t]))  # softmax

        loss -= np.mean(targets * np.log(ps[t]))  # 计算交叉熵

    # 反向传播过程
    dU, dW, dV = np.zeros_like(U), np.zeros_like(W), np.zeros_like(V)
    dbs, dbo = np.zeros_like(bs), np.zeros_like(bo)
    dhnext = np.zeros_like(hs[0])

    for t in reversed(range(seq_length)):
        dy = np.copy(ps[t])
        dy[targets[t]] -= 1  # softmax-交叉熵delta： y-t
        dV += np.outer(dy, hs[t])  # V-nabla
        dbo += dy  # bo-nabla
        dh = np.dot(W.T, dhnext) + np.dot(V.T, dy)  # backprop into hidden-state
        dhraw = (1 - hs[t] * hs[t]) * dh  # tanh的导数是1-logits^2
        dbs += dhraw  # bs-nabla
        dU += np.outer(dhraw, xs[t])  # U-nabla
        if t > 0:
            dW += np.outer(dhraw, hs[t - 1])  # W-nabla
        dhnext = dhraw

    return loss, dU, dW, dV, dbs, dbo, hs[seq_length - 1]


for n in range(5):
    # prepare inputs (we're sweeping from left to right in steps seq_length long)
    if p + seq_length + 1 >= len(x) or n == 0:
        hprev = np.zeros((hidden_size, 1))  # reset RNN memory
        p = 2  # go from start of data
    inputs = x[p - 2:p + seq_length]
    targets = ground_truth[p:p + seq_length]
    loss, dU, dW, dV, dbs, dbo, hprev = forward_and_backprop(inputs, targets, hprev)
    # perform parameter update with Adagrad
    for param, dparam in zip([U, W, V, bs, bo],
                             [dU, dW, dV, dbs, dbo]):
        param += -learning_rate * dparam  # sgd

    p += seq_length  # move data pointer

print('U:')
print(U)
print('W:')
print(W)
print('V:')
print(V)
