"""
Name/Country 训练,本例中的batch_size只能为1,因为name不等长
补齐等长的见
"""
import torch
import torch.nn as nn
from torch.utils.data import DataLoader

from Name2Country.Name2CountryDataset import Name2CountryDataset

# 一批数量,这里因为输入的name长短不一,所以使用batch_size=1,后面需要尝试补齐后再自定义batch_size
batch_size = 1

# 数据集
trainDataset = Name2CountryDataset(True)
testDataset = Name2CountryDataset(False)
# 定义加载器
trainDataLoader = DataLoader(trainDataset, batch_size=batch_size, shuffle=True, num_workers=2)
testDataLoader = DataLoader(testDataset, batch_size=batch_size, shuffle=False, num_workers=2)

# 数据的输入维度,此处按照ascii码的数量作为特征数
input_size = 128
# 输出维度,就是country size
output_size = trainDataset.countrySize()
# 嵌入层的维度
embedding_size = 20
# 隐层维度
hidden_size = 36
# rnn的序列长度不一致,随着name的长短变动,后面尝试补齐后再定义
# rnn的执行层数(垂直方向),最后一层执行结果hn是需要的输出
num_layers = 2


class NeuralNet(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 将ascii码维度特征进行映射
        self.embedding = nn.Embedding(input_size, embedding_size)
        # 使用RNN
        self.rnn = nn.GRU(embedding_size, hidden_size, num_layers, batch_first=True)
        # 使用线性变换获得最终的输出
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        # x的维度是(batch_size*name_size),name_size即为rnn的seq_size
        seq_size = len(x[0])
        # 先经过嵌入层
        x = self.embedding(x)
        # 定义隐层输入
        hiddenInput = torch.zeros(num_layers, batch_size, hidden_size)
        # 经过rnn,取得输出的最后一层hout
        _, x = self.rnn(x, hiddenInput)
        # 进行变化
        x = self.fc(x[-1])
        return x


# 初始化模型
model = NeuralNet()
# loss
criterion = nn.CrossEntropyLoss()
# 优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)


# 定义训练循环
def myTrain(epoch):
    # 清除梯度
    optimizer.zero_grad()
    # 记录loss
    curLoss = -1
    # 遍历数据并进行计算
    for idx, batch in enumerate(trainDataLoader):
        inputs, labels = batch
        # forward前馈
        y_pred = model(inputs)
        loss = criterion(y_pred, labels)

        curLoss = loss.item()

        # 反向传播
        loss.backward()
        # 更新权重
        optimizer.step()
    print(f"Epoch: {epoch}, Loss: {curLoss}", end=" ")


# 定义测试循环
def myTest():
    # 总的样本数
    totalCount = 0
    # 正确的数量
    correct = 0
    with torch.no_grad():
        # 遍历测试数据
        for idx, batch in enumerate(testDataLoader):
            inputs, labels = batch
            # 计算总数
            totalCount += len(labels)
            # 计算
            y_pred = model(inputs)
            # 计算最大值索引
            _, idx = y_pred.max(dim=1)
            # 对比算出正确的数量
            correct += (idx == labels).sum().item()
    # 输出当前的损失
    print(f"correct predictions: {correct * 1.0 / totalCount}")


if __name__ == '__main__':
    for epoch in range(100):
        myTrain(epoch)
        myTest()
