import torch 
import os 
import torch.nn as nn
from tqdm import tqdm
import random
from torch.utils.tensorboard import SummaryWriter

## 1. 处理数据 构建字典
def cons_dict(path):
    vocab = {"[pad]": 0}
    index = 1
    for each_dir in os.listdir(path):
        with open(path + each_dir, "r") as f :
            lines = f.readlines()
            for line in lines:
                line = line.strip("\n").strip()
                for c in line:
                    if c == "\xa0" or c == " ":
                        continue
                    if c not in vocab.keys():
                        vocab[c] = index 
                        index += 1
    
    return vocab

# 2. 构建输入输出数据
def get_data(path, vocab):

    label = []
    for each_dir in os.listdir(path):
        label.append(each_dir.split(".")[0])
    # print(label)

    input_data = []
    output_data = []
    for each_dir in os.listdir(path):       
        with open(path + each_dir, "r") as f :
            lines = f.readlines()
            cur_label = each_dir.split(".")[0]
            # 得到是第几类 为数字
            label_index = label.index(cur_label)
        
            for line in lines:
                line = line.strip("\n").strip()
                is_save = 1
                for c in line:
                    if c  not in vocab.keys():
                        # 出现unknow 跳过
                        is_save = 0
                        break
                if is_save == 1:
                    # 保留
                    input_data.append(line)
                    output_data.append(label_index)
                    
    # 分成训练和测试集合
    test_src = []
    test_tgt = []
    total = len(input_data)
    for i in range(total // 10):
        idx = random.randint(0, len(input_data)) # 随机取一个作测试集
        test_src.append(input_data[idx])
        input_data = input_data[:idx] + input_data[idx + 1:]

        test_tgt.append(output_data[idx])
        output_data = output_data[:idx] + output_data[idx + 1:]



    return input_data, output_data, test_src, test_tgt, label

class RnnModel(nn.Module):

    def __init__(self, vocab_size, embed_size, hidden_size, output_size):
        super(RnnModel, self).__init__()
        self.embed_size = embed_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.LSTM(embed_size, hidden_size, bidirectional=True)
        self.output = nn.Linear(2 * hidden_size, output_size)

    def forward(self, x):
        # x: (batch, time_step)
        x = self.embedding(x)
        # x: (batch, time_step, embed_size)
        x = x.permute(1, 0, 2)
        out, (last_state, last_cell) = self.rnn(x)
        # 此时state 是个元组 要进行cat特征
        state = torch.cat((last_state[1], last_state[0]), dim=1)
        # state: (1, 128)
        # print(state.shape)
        # 使用hidden 进行分类  hidden: (batch, hidden_size)
        output = self.output(state)

        return output
        

if __name__ == "__main__":

    writer = SummaryWriter("./log_bilstm/")

    vocab = cons_dict("./data/names/")

    src_data, target_data, test_src, test_tgt, label = get_data("./data/names/", vocab)

    print(vocab)
    print(label)

    print(src_data[:10])
    print(target_data[:10])

    rnn_model = RnnModel(len(vocab), 128, 64, len(label))
    rnn_model.train()
    optimizer = torch.optim.Adam(rnn_model.parameters(), lr=0.001)
    loss_func = nn.CrossEntropyLoss()
    report_loss = 0
    step = 0
    epochs = 15
    big_step = 0
    for epoch in range(epochs):
        for input_data, output_data in zip(src_data, target_data):
            idx = random.randint(0, len(src_data) - 1)
            input_data = src_data[idx]
            output_data = target_data[idx]
            step += 1
            optimizer.zero_grad()
            
            input_data = [vocab[c] for c in input_data]
            input_tensor = torch.tensor(input_data, dtype=torch.long)
            input_tensor = input_tensor.view((1, -1))
            target_tensor = torch.tensor([output_data], dtype=torch.long)

            out = rnn_model(input_tensor)

            # print(out.shape)
            # print(target_tensor.shape)
            loss = loss_func(out, target_tensor)

            loss.backward()
            report_loss += loss.item()
           
            optimizer.step()

            if step % 2000 == 0:
                print("report loss is : " + str(report_loss / 2000))
                writer.add_scalar('Loss/train', report_loss / 2000, big_step)
                report_loss = 0
                big_step += 1
                
    
        torch.save(rnn_model.state_dict(), "./lstm_model.pkl")
        # 计算一下准确率
        right = 0
        test_num = len(test_src)
        rnn_model.eval()
        for src, tgt in zip(test_src, test_tgt):
            
            input_data = [vocab[c] for c in src]
            input_tensor = torch.tensor(input_data, dtype=torch.long)
            input_tensor = input_tensor.view((1, -1))
        
            out = rnn_model(input_tensor)

            if out.argmax(dim=1) == tgt:
                right += 1
        acc = right / test_num
        print("epoch is : " + str(epoch))
        print("test acc is : " + str(acc))
        writer.add_scalar('acc/test', acc, epoch)
        
        train_right = 0
        total_train = len(src_data)
        for input_data, output_data in zip(src_data, target_data):
            input_data = [vocab[c] for c in input_data]
            input_tensor = torch.tensor(input_data, dtype=torch.long)
            input_tensor = input_tensor.view((1, -1))
        
            out = rnn_model(input_tensor)

            if out.argmax(dim=1) == output_data:
                train_right += 1
        
        train_acc = train_right / total_train
        print("train acc is : " + str(train_acc))
        writer.add_scalar('acc/train', train_acc, epoch)

        rnn_model.train()


    
    # # 测试
    # vocab = cons_dict("./data/names/")
    # src_data, target_data, test_src, test_tgt, label = get_data("./data/names/", vocab)

    # rnn_model = RnnModel(len(vocab), 128, 64, len(label))
    # rnn_model.eval()
    # rnn_model.load_state_dict(torch.load("./rnn_model.pkl", map_location="cpu"))

    # test_data = "Biganska"

    # input_data = [vocab[c] for c in test_data]
    # input_tensor = torch.tensor(input_data, dtype=torch.long)
    # input_tensor = input_tensor.view((1, -1))
  
    # out = rnn_model(input_tensor)

    # print(label[out.argmax(dim=1)])


