import torch 
import os 
import torch.nn as nn
from tqdm import tqdm
import random

## 1. 处理数据 构建字典
def cons_dict(path):
    vocab = {"[pad]": 0}
    index = 1
    for each_dir in os.listdir(path):
        with open(path + each_dir, "r") as f :
            lines = f.readlines()
            for line in lines:
                line = line.strip("\n").strip()
                for c in line:
                    if c == "\xa0" or c == " ":
                        continue
                    if c not in vocab.keys():
                        vocab[c] = index 
                        index += 1
    
    return vocab

# 2. 构建输入输出数据
def get_data(path, vocab):

    label = []
    for each_dir in os.listdir(path):
        label.append(each_dir.split(".")[0])
    # print(label)

    input_data = []
    output_data = []
    for each_dir in os.listdir(path):       
        with open(path + each_dir, "r") as f :
            lines = f.readlines()
            cur_label = each_dir.split(".")[0]
            # 得到是第几类 为数字
            label_index = label.index(cur_label)
        
            for line in lines:
                line = line.strip("\n").strip()
                is_save = 1
                for c in line:
                    if c  not in vocab.keys():
                        # 出现unknow 跳过
                        is_save = 0
                        break
                if is_save == 1:
                    # 保留
                    input_data.append(line)
                    output_data.append(label_index)
                    

    return input_data, output_data, label

class RnnModel(nn.Module):

    def __init__(self, vocab_size, embed_size, hidden_size, output_size):
        super(RnnModel, self).__init__()
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.RNN(embed_size, hidden_size)
        self.output = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        # x: (batch, time_step)
        x = self.embedding(x)
        # x: (batch, time_step, embed_size)
        x = x.permute(1, 0, 2)
        out, state = self.rnn(x)
        # print(state.shape)
        # hidden: (1, batch, hidden_size)
        out_state = state.squeeze(0)
        # 使用hidden 进行分类  hidden: (batch, hidden_size)
        output = self.output(out_state)

        return output
        
if __name__ == "__main__":
    vocab = cons_dict("./data/names/")

    src_data, target_data, label = get_data("./data/names/", vocab)

    print(vocab)
    print(label)

    print(src_data[:10])
    print(target_data[:10])

    rnn_model = RnnModel(len(vocab), 128, 64, len(label))
    rnn_model.train()
    optimizer = torch.optim.Adam(rnn_model.parameters(), lr=0.001)
    loss_func = nn.CrossEntropyLoss()
    report_loss = 0
    step = 0
    epochs = 5

    for epoch in range(epochs):
        for input_data, output_data in zip(src_data, target_data):
            idx = random.randint(0, len(src_data) - 1)
            input_data = src_data[idx]
            output_data = target_data[idx]
            step += 1
            optimizer.zero_grad()
            
            input_data = [vocab[c] for c in input_data]
            input_tensor = torch.tensor(input_data, dtype=torch.long)
            input_tensor = input_tensor.view((1, -1))
            target_tensor = torch.tensor([output_data], dtype=torch.long)

            out = rnn_model(input_tensor)

            # print(out.shape)
            # print(target_tensor.shape)
            loss = loss_func(out, target_tensor)

            loss.backward()
            report_loss += loss.item()
            optimizer.step()

            if step % 2000 == 0:
                print("report loss is : " + str(report_loss / 2000))
                report_loss = 0
    
    torch.save(rnn_model.state_dict(), "./rnn_model.pkl")

    
    # # 测试
    # vocab = cons_dict("./data/names/")
    # src_data, target_data, label = get_data("./data/names/", vocab)

    # rnn_model = RnnModel(len(vocab), 128, 64, len(label))
    # rnn_model.eval()
    # rnn_model.load_state_dict(torch.load("./rnn_model.pkl", map_location="cpu"))

    # test_data = "Antyuganov"

    # input_data = [vocab[c] for c in test_data]
    # input_tensor = torch.tensor(input_data, dtype=torch.long)
    # input_tensor = input_tensor.view((1, -1))
  
    # out = rnn_model(input_tensor)

    # print(label[out.argmax(dim=1)])


