import sys

import numpy as np 
import matplotlib.pyplot as plt 
sys.path.append('..')

from common.optimizer import SGD
from ch05.simple_rnnlm import SimpleRnnlm
from dataset import ptb


batch_size = 10
wordvec_size = 100
hidden_size = 100 # RNN的隐藏状态向量的元素个数
time_size = 5 # Truncated BPTT的时间跨度大小
lr = 0.1
max_epoch = 100

corpus,word_to_id,id_to_word = ptb.load_data('train')
corpus_size = 1000
corpus = corpus[:corpus_size]
vocab_size = int(max(corpus)+1)

xs = corpus[:-1] # 输入
ts = corpus[1:] # 输出（监督标签）
data_size = len(xs)

print(f"corpus size: {corpus_size}, vocabulary size: {vocab_size} ")

max_iters = data_size // (batch_size*time_size)
time_idx = 0
total_loss = 0
loss_count = 0
ppl_list = []

model = SimpleRnnlm(vocab_size,wordvec_size,hidden_size)
optimizer = SGD(lr)

jump = (corpus_size-1)//batch_size
offsets = [i*jump for i in range(batch_size)]

for epoch in range(max_epoch):
    for iter in range(max_iters):
        batch_x = np.empty((batch_size,time_size),dtype='i')
        batch_t = np.empty((batch_size,time_size),dtype='i')
        for t in range(time_size):
            for i,offset in enumerate(offsets):
                batch_x[i,t] = xs[(offset+time_idx) % data_size]
                batch_t[i,t] = ts[(offset+time_idx) % data_size]
            time_idx += 1
    
        loss = model.forward(batch_x,batch_t)
        model.backward()
        optimizer.update(model.params,model.grads)
        total_loss += loss 
        loss_count += 1
    
    ppl = np.exp(total_loss/loss_count)
    print('| epoch %d | perplexity %.2f' % (epoch+1, ppl))
    ppl_list.append(float(ppl))
    total_loss, loss_count = 0, 0

# 绘制图形
x = np.arange(len(ppl_list))
plt.plot(x, ppl_list, label='train')
plt.xlabel('epochs')
plt.ylabel('perplexity')
plt.show()
