import random

import torch
import numpy as np
import torch.nn as nn
from torch.utils import data

#生成训练数据
def gen_words(word1):
    words = {"[PAD]":0}
    for a,i in enumerate(word1):
        words[i] = len(words)
    words["[UNK]"] = len(words)
    return words
def dataset_builder_base(words, word_length, target):
    # 返回一个含有a的字符串还有a 的位置
    wordss = list(words)
    x = [ random.choice(wordss) for _ in range(word_length-1)]
    x.append(target)
    random.shuffle(x) # 随机
    y = x.index(target)
    x = [words.get(i,'[UNK]') for i in x]
    return x,y
def dataset_builder(dataset_num,words,target,word_length):
    X = []
    Y = []
    for i in range(dataset_num):
        x,y = dataset_builder_base(words, word_length, target)
        X.append(x)
        Y.append(y)
    return torch.LongTensor(X),torch.LongTensor(Y)

class MyDataset(data.Dataset):
    def __init__(self, dataset_num, words,word_length,target):
        self.data_tensorm, self.target_tensor= dataset_builder(dataset_num,words,target,word_length)
    def __len__(self):
        return self.data_tensorm.shape[0]
    def __getitem__(self, idx):
        return self.data_tensorm[idx],self.target_tensor[idx]

# 创建模型
class MyModel(nn.Module):
    def __init__(self,words,word_length,vector_num,hidden_size):
        super(MyModel, self).__init__()
        self.emb = nn.Embedding(len(words),vector_num,padding_idx=0)
        self.rnn = nn.RNN(vector_num,hidden_size,batch_first=True)
        self.loss = nn.functional.cross_entropy
    def forward(self,x,y_true=None):
        y_pred = self.emb(x)
        _,y_pred = self.rnn(y_pred)
        y_pred = y_pred.squeeze()
        if y_true is not None:
            return self.loss(y_pred,y_true)
        else:
            return y_pred
def evaluate(model,words):
    success = 0
    dataset_num = 100
    X,Y = dataset_builder(dataset_num,words,"a",5)
    X = X.to(model.device)
    Y = Y.to(model.device)
    model.eval()
    with torch.no_grad():
        Y_pred = model(X)
        for y_true,y_pred in zip(Y,Y_pred):
            y_pred = torch.argmax(y_pred)
            if y_pred == y_true:
                success+=1
    print("本轮预测准确率:%.5f" % (success/dataset_num))


def main():
    ## 超参数
    words = "hncjrzspda我们真的很好弟"
    words = gen_words(words)
    batch_size = 500
    data_set_num = 100000
    vector_num = 24
    hidden_size = 5
    word_length = 5
    epochs = 20
    lr = 1e-3
    model = MyModel(words,word_length,vector_num,hidden_size)
    adam = torch.optim.Adam(model.parameters(),lr=lr)
    if torch.cuda.is_available():
        model.cuda()
        model.device = torch.device('cuda:0')
    else:
        model.device = torch.device('cpu')
    tensor_dataloader = data.DataLoader(MyDataset(data_set_num,words,word_length,'a'),batch_size)
    for epoch in range(epochs):
        model.train()
        watch_loss = []
        for feature ,label in tensor_dataloader:
            feature = feature.to(model.device)  # 移到模型设备
            label = label.to(model.device)
            loss = model(feature,label)
            loss.backward()
            adam.step()
            adam.zero_grad()
            watch_loss.append(loss.item())
        print("epoch:%d,loss:%.5f" % (epoch,np.mean(watch_loss)))
        ## 每完成一轮都要验证正确率
        evaluate(model,words)

if __name__ == '__main__':
    main()


