import torch 
import torch.nn as nn 
import torch.nn.functional as F
from torch.nn.modules.loss import CrossEntropyLoss 
from torch.utils.data import Dataset, Sampler, RandomSampler, DataLoader 
from torch.nn.utils.rnn import pad_sequence
import os 
import numpy as np 


class TextDataset(Dataset):
    def __init__(self, file_name="data/mad.txt"):
        if os.path.exists("ckpt/word2id.mad"):
            with open("ckpt/word2id.mad", "r", encoding="utf-8") as f:
                self.word2id = eval(f.read())
        else:
            file_ = open(file_name, "r", encoding="utf-8")
            words_set = set(file_.read()) 
            self.word2id = dict(zip(words_set, range(len(words_set))))
            with open("ckpt/word2id.mad", "w", encoding="utf-8") as f:
                f.write(str(self.word2id))
            file_.close()
        label = [] 
        text = []
        file_ = open(file_name, "r", encoding="utf-8")
        for line in file_.readlines():
            line = line
            label.append(line[1:]) 
            text.append(line[:-1])

        self.labels = [[self.word2id.get(i, 0) for i in doc] for doc in label] 
        self.inputs = [[self.word2id.get(i, 0) for i in doc] for doc in text] 
        self.n_word = len(self.word2id)
    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        sample = (torch.Tensor(self.inputs[idx]).long(), torch.Tensor(self.labels[idx]).long())
        return sample

def collate_batch(batch):
    """
    定义后处理函数
    """
    xs, ds = [], []
    mk = []
    for x, d in batch:
        xs.append(x) 
        ds.append(d)
        mk.append(torch.ones_like(x))
    xs = pad_sequence(xs).long()
    ds = pad_sequence(ds).long()
    mk = pad_sequence(mk).float()
    return xs, ds, mk 

class Model(nn.Module):
    def __init__(self, n_word):
        super().__init__()
        self.n_word = n_word
        self.n_hidden = 128 
        self.n_layer = 2 
        # 文本向量化（Word Embedding）函数
        self.emb = nn.Embedding(self.n_word, self.n_hidden)
        # 循环神经网络主体（GRU网络）
        self.rnn = nn.GRU(self.n_hidden, self.n_hidden, self.n_layer)
        # 定义输出（变为字符类别预测）
        self.out = nn.Linear(self.n_hidden, self.n_word)
    def forward(self, x, h0):
        B, T = x.shape 
        x = self.emb(x)
        y, h0 = self.rnn(x, h0)
        y = self.out(y)
        y = y.permute(1, 2, 0) # T,B,C->B,C,T
        return y 



def main():
    train_dataset = TextDataset("data/mad2.txt")     
    train_dataloader = DataLoader(train_dataset, batch_size=20, shuffle=True, collate_fn=collate_batch, num_workers=3)

    gpu = True  #使用GPU 

    model = Model(train_dataset.n_word)
    model.train() 
    if gpu:
        model.cuda() 
    model.load_state_dict(torch.load("ckpt/mad.pt"))
    optim = torch.optim.Adam(model.parameters(), 1e-4)
    lossfn = CrossEntropyLoss(reduction="none")
    n_epoch = 1000
    count = 0
    for b in range(n_epoch):
        for x, d, m in train_dataloader:
            t, nbatch = x.shape 
            h0 = torch.zeros([2, nbatch, model.n_hidden])
            if gpu:
                x = x.cuda() 
                d = d.cuda() 
                m = m.cuda()
                h0 = h0.cuda()
            y = model(x, h0) 
            loss = lossfn(y, d.permute(1, 0)) * m.permute(1, 0) 
            loss = loss.mean()
            loss.backward() 
            optim.step() 
            optim.zero_grad()
            count += 1
            if count % 50 ==0:
                p = y.detach().cpu().numpy() 
                p = np.argmax(p, axis=1)
                d = d.cpu().numpy()
                print(b, count, loss.detach().cpu().numpy())
                torch.save(model.state_dict(), "ckpt/mad.pt")

if __name__ == "__main__":
    main()