import sys
sys.path.append('..')
import matplotlib.pyplot as plt
import numpy as np
from common.optimizer import SGD
from dataset import ptb
from simple_rnnlm import SimpleRnnlm

batch_size = 10 #批次大小
# 
wordvec_size = 100
hidden_size = 100

# 每个序列是5个单词
time_size = 5
lr = 0.1
max_epoch = 200

corpus, word_to_id, id_to_word = ptb.load_data('train')

# 仅取前1000个词做测试，corpus是1000个整数的列表
corpus_size = 1000
corpus = corpus[:corpus_size]

# 单词种类，取词序列中最大的ID+1，因为ID是从0开始的
vocab_size = int(max(corpus)+1)

# 训练的序列是从第0个开始，到倒数第二个，倒数第一个不用，因为没有预测值
xs = corpus[:-1]
# 期望值是从第1个开始，到最后一个结束，不取第0个
ts = corpus[1:]

# 训练序列的长度，当前应该是999
data_size = len(xs)
print('corpus size: %d, vocabulary size: %d' %(corpus_size, vocab_size))

# 计算一轮训练需要迭代多少次，应该是19
max_iters = data_size // (batch_size * time_size)
print('max_iters: ', max_iters)
# 看起来像是序列值的索引
time_idx = 0
# 一轮训练的总误差
total_loss = 0

loss_count = 0
# 看起来是困惑度历史记录
ppl_list = []

# 模型，简单RNN
model = SimpleRnnlm(vocab_size, wordvec_size, hidden_size)
# 优化器
optimizer = SGD(lr)

# 训练序列的词个数除以批量大小，一个批次中，上一个数据的索引与下一个数据索引的变化数
jump = (corpus_size - 1) // batch_size
# 一个批次数据中，序列索引的的变化
offsets = [i * jump for i in range(batch_size)]

# 训练100轮
for epoch in range(max_epoch):
    # 每轮训练99次
    for iter in range(max_iters):
        # 新建一个空的训练序列，以备后续填充数据，矩阵的形状为 批次大小*训练序列大小 10 * 5
        batch_x = np.empty((batch_size, time_size), dtype='i')
        batch_t = np.empty((batch_size, time_size), dtype='i')
        for t in range(time_size):
            for i, offset in enumerate(offsets):
                batch_x[i, t] = xs[(offset + time_idx) % data_size]
                batch_t[i, t] = ts[(offset + time_idx) % data_size]
            time_idx += 1

        loss = model.forward(batch_x, batch_t)
        model.backward()
        optimizer.update(model.params, model.grads)
        total_loss += loss
        loss_count += 1

    ppl = np.exp(total_loss / loss_count)
    print('| epoch %d | perplexity %.2f'
            %(epoch+1, ppl))
    ppl_list.append(float(ppl))
    total_loss, loss_count = 0, 0