import numpy as np
import torch.nn.functional as F
from torch import nn
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
# 定义网络
class lstm_model(nn.Module):
    def __init__(self, vocab, hidden_size, num_layers, dropout=0.5):
        super(lstm_model, self).__init__()
        self.vocab = vocab  # 字符数据集,也可以理解为输出的类别
        # 索引 : 字符 => 方便数字编码转为对应字符
        self.int_char = {i: char for i, char in enumerate(vocab)}  # 另一种写法：self.int_char = dict(enumerate(vocab))
        # 字符 : 索引 => 方便字符转为数字编码
        self.char_int = {char: i for i, char in self.int_char.items()}
        # 对字符进行one-hot encoding
        self.encoder = OneHotEncoder(sparse=False).fit(vocab.reshape(-1, 1))  # 这里需要对vocab(字符表)进行shape转换
        # 隐藏层大小
        self.hidden_size = hidden_size
        # lstm层数
        self.num_layers = num_layers

        # lstm层
        self.lstm = nn.LSTM(len(vocab), hidden_size, num_layers, batch_first=True, dropout=dropout)

        # 全连接层
        self.linear = nn.Linear(hidden_size, len(vocab))
        # 这里的输出是每个字符的得分,因此文本生成的本质其实就是分类,哪个字符得分最高,下一个字符就是得分高的字符


    def forward(self, sequence, hs=None):
        out, hs = self.lstm(sequence, hs)  # lstm的输出格式：(batch_size, sequence_length, hidden_size)
        out = out.reshape(-1, self.hidden_size)  # 这里需要将out转换为linear的输入格式，即(batch_size*sequence_length, hidden_size)
        output = self.linear(out)  # linear的输出格式：(batch_size*sequence_length, vocab_size)
        return output, hs

    def onehot_encode(self, data):
        return self.encoder.transform(data)

    def onehot_decode(self, data):
        return self.encoder.inverse_transform(data)

    def label_encode(self, data):
        # print(np.array([self.char_int[ch] for ch in data]))
        return np.array([self.char_int[ch] for ch in data])

    def label_decode(self, data):
        return np.array([self.int_char[ch] for ch in data])

