import json
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import string
import time

# 得到全部字符
all_letters = string.ascii_letters + " ,.;'"
n_letter = len(all_letters)

#提取标签
categories = ['German', 'Greek', 'Spanish', 'Korean', 'Arabic',
              'Polish', 'Russian', 'Portuguese', 'Japanese', 'French',
              'Chinese', 'Italian', 'Irish', 'Scottish', 'Dutch', 'English', 'Czech', 'Vietnamese']
category_num = len(categories)

# 读数据
def read_data(filename):
    # 1. 初始化两个空列表
    my_list_x, my_list_y = [], []
    # 2. 读取文件内容
    with open(filename,'r', encoding='utf-8') as fr:
        for line in fr.readlines():
            if len(line) <= 5:
                continue
            # strip()方法默认将字符串首尾两端的空白去掉
            x, y = line.strip().split('\t')
            my_list_x.append(x)
            my_list_y.append(y)

    return my_list_x, my_list_y

# 构建数据源NameClassDataset
class NameClassDataset(Dataset):
    def __init__(self, mylist_x, mylist_y):
        self.mylist_x = mylist_x
        self.mylist_y = mylist_y
        self.sample_len = len(mylist_x)

    # 定义魔法方法len
    def __len__(self):
        return self.sample_len

    # 定义魔法方法getitem
    def __getitem__(self, index):
        # 1.index异常值处理
        index = min(max(index, 0), self.sample_len - 1)
        # 2. 根据index取出人名和国家名
        x = self.mylist_x[index]

        y = self.mylist_y[index]

        # 3.需要对人名进行one-hot编码表示：这里的思路是：针对每个人名组成的单词进行one-hot，然后再拼接
        tensor_x = torch.zeros(len(x), n_letter)

        for li, letter in enumerate(x):
            tensor_x[li][all_letters.find(letter)] = 1
       # 4.获取标签
        tensor_y = torch.tensor(categories.index(y), dtype=torch.long)
        return tensor_x, tensor_y

# 构建RNN
class MyRNN(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super().__init__()
        # input_size 代表词嵌入维度；
        self.input_size = input_size
        # hidden_size代表RNN隐藏层维度
        self.hidden_size = hidden_size
        # output_size代表：国家种类个数
        self.output_size = output_size
        self.num_layers = num_layers
        # 定义RNN网络层
        # 和讲义不一样，我设定了batch_first=True,意味着rnn接受的input第一个参数是batch_size
        self.rnn = nn.RNN(self.input_size, self.hidden_size,
                          num_layers=self.num_layers, batch_first=True)
        # 定义输出网络层
        self.linear = nn.Linear(self.hidden_size, self.output_size)

        # 定义softmax层
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input, hidden):
        # input的shape---》[batch_size, seq_len, input_size] [1, 9, 57]
        # hidden的shape---》[num_layers, batch_size, hidden_size] [1,1,128]
        # 将input和hidden送入RNN模型得到结果rnn_output【1,9,128】,rnn_hn[1,1,128]
        rnn_output, rnn_hn = self.rnn(input, hidden)
        # temp:[1, 128]
        temp = rnn_output[0][-1].unsqueeze(0)
        # 将临时tmep：代表当前样本最后一词的隐藏层输出结果[1, 18]
        output = self.linear(temp)
        # 经过softmax
        return self.softmax(output), rnn_hn

    def inithidden(self):
        return torch.zeros(self.num_layers, 1, self.hidden_size)
# 构建LSTM
class MyLSTM(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super().__init__()
        # input_size 代表词嵌入维度；
        self.input_size = input_size
        # hidden_size代表RNN隐藏层维度
        self.hidden_size = hidden_size
        # output_size代表：国家种类个数
        self.output_size = output_size
        self.num_layers = num_layers
        # 定义LSTM网络层
        # 和讲义不一样，我设定了batch_first=True,意味着rnn接受的input第一个参数是batch_size
        self.lstm = nn.LSTM(self.input_size, self.hidden_size,
                            num_layers=self.num_layers, batch_first=True)
        # 定义输出网络层
        self.linear = nn.Linear(self.hidden_size, self.output_size)

        # 定义softmax层
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input, hidden, c0):
        # input的shape---》[batch_size, seq_len, input_size] [1, 9, 57]
        # hidden的shape---》[num_layers, batch_size, hidden_size] [1,1,128]
        # 将input和hidden送入RNN模型得到结果rnn_output【1,9,128】,rnn_hn[1,1,128]
        lstm_output, (lstm_hn, lstm_cn) = self.lstm(input, (hidden, c0))
        # temp:[1, 128]
        temp = lstm_output[0][-1].unsqueeze(0)
        # 将临时tmep：代表当前样本最后一词的隐藏层输出结果[1, 18]
        output = self.linear(temp)
        # 经过softmax
        return self.softmax(output), lstm_hn, lstm_cn

    def inithidden(self):
        h0 = torch.zeros(self.num_layers, 1, self.hidden_size)
        c0 = torch.zeros(self.num_layers, 1, self.hidden_size)
        return h0, c0

# 构建GRU
class MyGRU(nn.Module):
    def __init__(self, input_size, hidden_size, output_size, num_layers=1):
        super().__init__()
        # input_size 代表词嵌入维度；
        self.input_size = input_size
        # hidden_size代表RNN隐藏层维度
        self.hidden_size = hidden_size
        # output_size代表：国家种类个数
        self.output_size = output_size
        self.num_layers = num_layers
        # 定义GRU网络层
        # 和讲义不一样，我设定了batch_first=True,意味着rnn接受的input第一个参数是batch_size
        self.gru = nn.GRU(self.input_size, self.hidden_size,
                          num_layers=self.num_layers, batch_first=True)
        # 定义输出网络层
        self.linear = nn.Linear(self.hidden_size, self.output_size)

        # 定义softmax层
        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, input, hidden):
        # input的shape---》[batch_size, seq_len, input_size] [1, 9, 57]
        # hidden的shape---》[num_layers, batch_size, hidden_size] [1,1,128]
        # 将input和hidden送入RNN模型得到结果rnn_output【1,9,128】,rnn_hn[1,1,128]
        gru_output, gru_hn = self.gru(input, hidden)
        # temp:[1, 128]
        temp = gru_output[0][-1].unsqueeze(0)
        # 将临时tmep：代表当前样本最后一词的隐藏层输出结果[1, 18]
        output = self.linear(temp)
        # 经过softmax
        return self.softmax(output), gru_hn

    def inithidden(self):
        return torch.zeros(self.num_layers, 1, self.hidden_size)


my_lr = 1e-3
epochs = 1
# 训练rnn模型
def train_rnn():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # 读取数据
    my_list_x, my_list_y = read_data(filename='D:\Project\AIStudent\\NLPBase\\02-RNN\\00-人名分类案例\\00-data\\name_classfication.txt')
    # 实例化dataset数据源对象
    my_dataset = NameClassDataset(my_list_x, my_list_y)
    # 实例化模型
    # n_letters=57, hidden_size=128,类别总数output_size=18
    my_rnn = MyRNN(input_size=57, hidden_size=128, output_size=18).to(device)
    # 实例化损失函数对象
    my_nll_loss = nn.NLLLoss()
    # 实例化优化器对象
    my_optim = optim.Adam(my_rnn.parameters(), lr=my_lr)
    # 定义打印日志的参数
    start_time = time.time()
    total_iter_num = 0 # 当前已经训练的样本总数
    total_loss = 0  # 已经训练的损失值
    total_loss_list = [] # 每隔n个样本，保存平均损失值
    total_acc_num = 0 # 预测正确的样本个数
    total_acc_list = [] # 每隔n个样本，保存平均准确率
    # 开始训练
    for epoch_idx in range(epochs):
        # 实例化dataloader
        my_dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)
        # 开始内部迭代数据，送入模型
        for i, (x, y) in enumerate(my_dataloader):
            x, y = x.to(device), y.to(device)
            input_tensor = x[0].unsqueeze(0).to(device)  # 放到GPU或CPU
            hidden = my_rnn.inithidden().to(device)  # 同样放到相同设备
            output, hn = my_rnn(input=input_tensor, hidden=hidden)
            # 计算损失
            my_loss = my_nll_loss(output, y)

            # 梯度清零
            my_optim.zero_grad()
            # 反向传播
            my_loss.backward()
            # 梯度更新
            my_optim.step()

            # 统计一下已经训练样本的总个数
            total_iter_num = total_iter_num + 1

            # 统计一下已经训练样本的总损失
            total_loss = total_loss + my_loss.item()

            # 统计已经训练的样本中预测正确的个数
            i_predict_num = 1 if torch.argmax(output).item() == y.item() else 0
            total_acc_num = total_acc_num + i_predict_num
            # 每隔100次训练保存一下平均损失和准确率
            if total_iter_num % 100 == 0:
                avg_loss = total_loss / total_iter_num
                total_loss_list.append(avg_loss)

                avg_acc = total_acc_num / total_iter_num
                total_acc_list.append(avg_acc)
            # 每隔2000次训练打印一下日志
            if total_iter_num % 2000 == 0:
                temp_loss = total_loss / total_iter_num
                temp_acc = total_acc_num / total_iter_num
                temp_time = time.time() - start_time
                print('轮次:%d, 损失:%.6f, 时间:%d，准确率:%.3f' %(epoch_idx+1, temp_loss, temp_time, temp_acc))
        torch.save(my_rnn.state_dict(), './ai20_rnn_%d.bin'%(epoch_idx+1))
    # 计算总时间
    total_time = int(time.time() - start_time)
    print('训练总耗时：', total_time)
    # 将结果保存到文件中
    dict1 = {"avg_loss":total_loss_list,
             "all_time": total_time,
             "avg_acc": total_acc_list}
    with open('./ai_rnn.json', 'w') as fw:
        fw.write(json.dumps(dict1))

    return total_loss_list, total_time, total_acc_list

if __name__ == '__main__':
    total_loss_list, total_time, total_acc_list = train_rnn()



