from tools import *

# 构建所有类别的列表
all_categories = []
data_path = "../data/names/"
category_lines = {}
# 遍历所有的文件, 使用glob.glob中可以利用正则表达式的便利
for filename in glob.glob(data_path + "*.txt"):
    # 获取每个文件的文件名（不包含后缀名），以作为类别
    category = os.path.splitext(os.path.basename(filename))[0]
    # 逐一将其装入所有类别的列表中
    all_categories.append(category)
    # 然后读取每个文件的内容, 形成名字的列表
    lines = read_lines(filename)
    # 按照对应的类别, 将名字列表写入到category_lines字典中
    category_lines[category] = lines
n_categories = len(all_categories)

all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# print("n_letters:", n_letters)
input_size = n_letters
n_hidden = 128
output_size = n_categories
# input1 = line_to_tensor('B').squeeze(0)

# hidden = c = torch.zeros(1, 1, n_hidden)
rnn = MyRNN(input_size, n_hidden, output_size)
lstm = MyLSTM(input_size, n_hidden, output_size)
gru = MyGRU(input_size, n_hidden, output_size)

criterion = nn.NLLLoss()

lr = 0.005


def train_rnn(category_tensor, line_tensor):
    """

    :param category_tensor:
    :param line_tensor:
    :return:
    """
    rnn.to(device)
    # 首先实例化rnn初始化隐藏层张量
    hidden = rnn.init_hidden()
    # 将模型结构中的梯度归0
    rnn.zero_grad()
    #
    for i in range(line_tensor.size()[0]):
        output, hidden = rnn(line_tensor[i], hidden)
    # 进行对比计算损失，需要减少第一个维度
    loss = criterion(output.squeeze(0), category_tensor)

    loss.backward()

    for p in rnn.parameters():
        p.data.add_(-lr, p.grad.data)

    return output, loss.item()


def train_lstm(category_tensor, line_tensor):
    lstm.to(device)
    hidden, c = lstm.init_hidden_c()
    lstm.zero_grad()
    for i in range(line_tensor.size()[0]):
        output, hidden, c = lstm(line_tensor[i], hidden, c)
    loss = criterion(output.squeeze(0), category_tensor)
    loss.backward()

    for p in lstm.parameters():
        p.data.add_(p.grad.data, alpha=-lr)
    return output, loss.item()


def train_gru(category_tensor, line_tensor):
    """

    :param category_tensor:
    :param line_tensor:
    :return:
    """
    # 首先实例化rnn初始化隐藏层张量
    gru.to(device)
    hidden = gru.init_hidden()
    # 将模型结构中的梯度归0
    gru.zero_grad()
    #
    for i in range(line_tensor.size()[0]):
        output, hidden = gru(line_tensor[i], hidden)
    # 进行对比计算损失，需要减少第一个维度
    loss = criterion(output.squeeze(0), category_tensor)

    loss.backward()

    for p in gru.parameters():
        p.data.add_(p.grad.data, alpha=-lr)
        # p = torch.add_(-lr, p.grad.data.item(), alpha=p.grad.data.item())

    return output, loss.item()


# 总迭代次数
n_iters = 2000
# 结果打印间隔
print_every = 20
# 制图间隔
plot_every = 10

device = 'cuda' if torch.cuda.is_available() else 'cpu'


def train(train_type_fn):
    """

    :param train_type_fn:
    :return:
    """
    all_losses = []
    start = time.time()
    current_loss = 0
    all_categories, category_lines = init_data()

    for ite in range(1, n_iters + 1):
        category, line, category_tensor, line_tensor = random_train_data(all_categories, category_lines)

        output, loss = train_type_fn(category_tensor, line_tensor)

        current_loss += loss
        if ite % print_every == 0:
            guess, guess_i = category_from_output(all_categories, output)
            correct = '✓' if guess == category else '✕ (%s)' % category
            print('{:5d} {:3d}% ({}) {:.4f} {} / {} {}'.format(ite, int(ite / n_iters * 100), time_since(start), loss, line, guess, correct))
        if ite % plot_every == 0:
            all_losses.append(current_loss / plot_every)
            current_loss = 0
    return all_losses, int(time.time() - start)


if __name__ == '__main__':
    all_losses1, period1 = train(train_rnn)
    all_losses2, period2 = train(train_lstm)
    all_losses3, period3 = train(train_gru)

    plt.figure(figsize=(16, 8))
    plt.subplot(121)
    plt.plot(all_losses1, label='RNN')
    plt.plot(all_losses2, label='LSTM', color='red')
    plt.plot(all_losses3, label='GRU',  color='orange')

    plt.subplot(122)
    x_data = ['RNN', 'LSTM', 'GRU']
    y_data = [period1, period2, period3]
    print(y_data)
    plt.bar(range(len(x_data)), y_data, tick_label=x_data)
    plt.show()
    # all_categories, category_lines = init_data()
    # category, line, category_tensor, line_tensor = random_train_data(all_categories, category_lines)
    # output, loss = train_rnn(category_tensor, line_tensor)
    # print(output)
    # print(loss)
