import torch
from torch import nn
from torch.nn.utils.rnn import pack_padded_sequence
from torch.utils.data import DataLoader

from Name2Country.Name2CountryDatasetWithComplement import Name2CountryDatasetWithComplement

# 一批大小
batch_size = 256
# 隐层大小,这里嵌入层维度也使用此变量
hidden_size = 100
# rnn层数
num_layers = 1
# 输入的维度特征数
input_size = 128

# 初始化数据集合
trainDataset = Name2CountryDatasetWithComplement(True)
testDataset = Name2CountryDatasetWithComplement(False)
# 初始化数据加载器
trainLoader = DataLoader(trainDataset, batch_size=batch_size, shuffle=True, num_workers=2)
testLoader = DataLoader(testDataset, batch_size=batch_size, shuffle=False, num_workers=2)

# 获得country数
N_COUNTRY = trainDataset.countries_num


def name2Num(name):
    """
    将name进行逐字符转换为ascii码
    :param name: 输入的name字符串
    :return: 转换好的ascii列表,name的size
    """
    nameN = [ord(x) for x in name]
    nameLen = len(name)
    return nameN, nameLen


def makeTensor(names, countries):
    """
    将name转换为ascii码列表
    :param names: 一个batch的name
    :param countries: name对应的country
    :return: 已经转换好、排序完毕的name数字列表/name长度/对应的country
    """
    # 将name转换为ascii并获得大小
    nameNLi, nameLenLi = zip(*[name2Num(x) for x in names])
    # 转换到tensor
    countries = torch.tensor(countries, dtype=torch.long)
    nameLenLi = torch.tensor(nameLenLi, dtype=torch.long)
    # 建立目标矩阵(batchSize*seqSize,seqSize是最大的name size)
    seq_tensor = torch.zeros(len(nameNLi), nameLenLi.max(), dtype=torch.long)
    # 将当前的值复制填充到目标矩阵
    for idx, (name, length) in enumerate(zip(nameNLi, nameLenLi)):
        # 将值填充到目标矩阵每一行
        seq_tensor[idx, :length] = torch.tensor(name, dtype=torch.long)
    # 从大到小排序,为了满足pack_padded_sequence的输入
    seq_length, orderedIdx = nameLenLi.sort(dim=0, descending=True)
    # 按照此排序,将目标name矩阵和countries进行排序
    seq_tensor = seq_tensor[orderedIdx]
    countries = countries[orderedIdx]
    # 返回 目标矩阵,目标length,countries
    return seq_tensor, seq_length, countries


class NeuralNetwork(nn.Module):
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        # 是否双向的rnn
        self.bidirectional = False
        # 2表示双向的rnn,1表示单向
        self.n_directional = 2 if self.bidirectional else 1
        # 嵌入层
        self.embedding = nn.Embedding(input_size, hidden_size)
        # rnn层
        self.gru = nn.GRU(hidden_size, hidden_size, num_layers=num_layers, bidirectional=self.bidirectional)
        # 全链接线性层
        self.fc = nn.Linear(hidden_size * self.n_directional, N_COUNTRY)

    def init_hidden(self, batch_size):
        # 初始化rnn需要的隐层输入,根据是否为双向rnn而决定第一个数据维度大小
        hidden = torch.zeros(num_layers * self.n_directional, batch_size, hidden_size)
        return hidden

    def forward(self, input, seq_lengths):
        """
        前馈计算
        :param input: batch_size*seq_len的值
        :param seq_lengths: 此时input对应的序列长度
        :return:
        """
        # 将输入转置,维度从(batch_size*seq_size)转到(seq_size*batch_size)
        input = input.t()
        # 进行嵌入层计算,输出维度为(seq_size*batch_size*embedding_size)
        out = self.embedding(input)
        # 进行padding,已经预先将embed的数据进行排序(降序操作),然后进行集成,集成时去除掉所有填充的0
        gruInput = pack_padded_sequence(out, seq_lengths)
        # 进入rnn计算,这里取用隐层输出
        out, hidden = self.gru(gruInput, self.init_hidden(input.size(1)))
        # 如果是双向则需要进行2层的hidden拼接(前向和反向)
        if self.bidirectional:
            hidden = torch.cat((hidden[-1], hidden[-2]), dim=1)
        else:
            hidden = hidden[-1]
        # 计算全链接
        output = self.fc(hidden)
        return output


# 初始化模型
model = NeuralNetwork()
# 定义loss
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)


def myTrain(epoch):
    totalLoss = 0
    for idx, (inputs, labels) in enumerate(trainLoader):
        seqName, seqLength, labels = makeTensor(inputs, labels)
        # 归零梯度
        optimizer.zero_grad()
        # 计算前馈
        outputs = model(seqName, seqLength)
        loss = criterion(outputs, labels)
        # 反向传播
        loss.backward()
        # 更新权重
        optimizer.step()

        # 叠加loss
        totalLoss += loss.item()
    # 计算平均loss
    print(f"Epoch: {epoch}, Loss: {totalLoss / len(trainDataset)}", end=" ")


def myTest():
    correct = 0
    with torch.no_grad():
        for idx, (inputs, labels) in enumerate(testLoader):
            seqName, seqLength, labels = makeTensor(inputs, labels)
            # 计算当前输出
            outputs = model(seqName, seqLength)
            # 获得输出的最大索引
            _, idx = outputs.max(dim=1)
            # 叠加成功数
            correct += (idx == labels).sum().item()

        print(f"Accuracy: {correct * 1.0 / len(testDataset)}")


if __name__ == '__main__':
    for epoch in range(100):
        myTrain(epoch)
        myTest()
