import torch
import torch.nn as nn
from torch.autograd import Variable

#多语句的LSTM类(多层)+底层案例

torch.manual_seed(777)  # reproducibility

sentence = ("if you want to build a ship, don't drum up people together to "
            "collect wood and don't assign them tasks and work, but rather "
            "teach them to long for the endless immensity of the sea.")

char_set = list(set(sentence)) # 字母类别数量
char_dic = {w: i for i, w in enumerate(char_set)}

# hyperparameters
learning_rate = 0.1
num_epochs = 500
input_size = len(char_set)  # 输入数据维度 RNN input size (one hot size)
hidden_size = len(char_set)  # 隐藏层神经元个数 RNN output size
num_classes = len(char_set)  # 类别个数 final output size (RNN or softmax, etc.)
sequence_length = 10  # 序列长度 any arbitrary number
num_layers = 2  # rnn单元层数  number of layers in RNN

#构建数据集
dataX = []
dataY = []
for i in range(0, len(sentence) - sequence_length):
    x_str = sentence[i:i + sequence_length]
    y_str = sentence[i + 1: i + sequence_length + 1]
    print(i, x_str, '->', y_str)

    x = [char_dic[c] for c in x_str]  # x str to index
    y = [char_dic[c] for c in y_str]  # y str to index

    dataX.append(x)
    dataY.append(y)

batch_size = len(dataX)   #样本个数

#转换成torch的类型
x_data = torch.Tensor(dataX)
y_data = torch.LongTensor(dataY)

# one hot encoding

# 独热，维度调整成  样本数，时间步，特征
def one_hot(x, num_classes):
    idx = x.long()
    idx = idx.view(-1, 1)
    x_one_hot = torch.zeros(x.size()[0] * x.size()[1], num_classes)
    x_one_hot.scatter_(1, idx, 1)
    x_one_hot = x_one_hot.view(x.size()[0], x.size()[1], num_classes)
    return x_one_hot


x_one_hot = one_hot(x_data, num_classes)

inputs = Variable(x_one_hot)
labels = Variable(y_data)


class LSTM(nn.Module):

    def __init__(self, num_classes, input_size, hidden_size, num_layers):
        super(LSTM, self).__init__()
        self.num_classes = num_classes
        self.num_layers = num_layers
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.sequence_length = sequence_length
        # Set parameters for RNN block
        # Note: batch_first=False by default.
        # When true, inputs are (batch_size, sequence_length, input_dimension)
        # instead of (sequence_length, batch_size, input_dimension)
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
                            num_layers=num_layers, batch_first=True)
        # Fully connected layer
        self.fc = nn.Linear(hidden_size, num_classes)

    def forward(self, x):
        # Initialize hidden and cell states
        h_0 = Variable(torch.zeros(
            self.num_layers, x.size(0), self.hidden_size))
        c_0 = Variable(torch.zeros(
            self.num_layers, x.size(0), self.hidden_size))
        # h_0 = Variable(torch.zeros(
        # self.num_layers, x.size(0), self.hidden_size))
        # c_0 = Variable(torch.zeros(
        # self.num_layers, x.size(0), self.hidden_size))

        # Propagate input through LSTM
        # Input: (batch, seq_len, input_size)
        # lstm返回值:out是全部单元的输出, _最后一个单元的输出
        out, _ = self.lstm(x, (h_0, c_0))
        # Note: the output tensor of LSTM in this case is a block with holes
        # > add .contiguous() to apply view(),
        # out = out.contiguous().view(-1, self.hidden_size)
        out = torch.reshape(out,[-1, self.hidden_size])
        # Return outputs applied to fully connected layer
        out = self.fc(out)
        return out


# Instantiate RNN model
lstm = LSTM(num_classes, input_size, hidden_size, num_layers)

# Set loss and optimizer function
criterion = torch.nn.CrossEntropyLoss()    # Softmax is internally computed.
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)

# Train the model
for epoch in range(num_epochs):

    outputs = lstm(inputs)
    optimizer.zero_grad()
    # obtain the loss function
    # flatten target labels to match output
    loss = criterion(outputs, labels.view(-1))
    loss.backward()
    optimizer.step()
    # obtain the predicted indices of the next character
    _, idx = outputs.max(1)
    idx = idx.data.numpy()
    idx = idx.reshape(-1, sequence_length)  # (170,10)
    # display the prediction of the last sequence
    result_str = [char_set[c] for c in idx[-1]]
    print("epoch: %d, loss: %1.3f" % (epoch + 1, loss.item()))
    print("Predicted string: ", ''.join(result_str))

print("Learning finished!")



######################
# 2 pytorch contiguous的使用
#
# contiguous一般与transpose，permute，view搭配使用：使用transpose或permute进行维度变换后，调用contiguous，然后方可使用view对维度进行变形（如：tensor_var.contiguous().view() ），示例如下：
#
# x = torch.Tensor(2,3)
# y = x.permute(1,0)         # permute：二维tensor的维度变换，此处功能相当于转置transpose
# y.view(-1)                 # 报错，view使用前需调用contiguous()函数
# y = x.permute(1,0).contiguous()
# y.view(-1)                 # OK
# 具体原因有两种说法：
#
# 1 transpose、permute等维度变换操作后，tensor在内存中不再是连续存储的，而view操作要求tensor的内存连续存储，所以需要contiguous来返回一个contiguous copy；
#
# 2 维度变换后的变量是之前变量的浅拷贝，指向同一区域，即view操作会连带原来的变量一同变形，这是不合法的，所以也会报错；---- 这个解释有部分道理，也即contiguous返回了tensor的深拷贝contiguous copy数据；
#
# 3 contiguous函数分析，参考CSDN博客
#
# 在pytorch中，只有很少几个操作是不改变tensor的内容本身，而只是重新定义下标与元素的对应关系。换句话说，这种操作不进行数据拷贝和数据的改变，变的是元数据，这些操作是：
#
# narrow()，view()，expand()，transpose()；
# 举个栗子，在使用transpose()进行转置操作时，pytorch并不会创建新的、转置后的tensor，而是修改了tensor中的一些属性（也就是元数据），使得此时的offset和stride是与转置tensor相对应的，而转置的tensor和原tensor的内存是共享的！
#
# 为了证明这一点，我们来看下面的代码：
#
# x = torch.randn(3, 2)
# y = x.transpose(x, 0, 1)
# x[0, 0] = 233
# print(y[0, 0])       # print 233
# 可以看到，改变了x的元素的值的同时，y的元素的值也发生了变化；也即，经过上述操作后得到的tensor，它内部数据的布局方式和从头开始创建一个常规的tensor的布局方式是不一样的！于是就有contiguous()的用武之地了。
#
# 在上面的例子中，x是contiguous的，但y不是（因为内部数据不是通常的布局方式）。注意：不要被contiguous的字面意思“连续的”误解，tensor中数据还是在内存中一块区域里，只是布局的问题！
#
# 当调用contiguous()时，会强制拷贝一份tensor，让它的布局和从头创建的一模一样；
#
# 一般来说这一点不用太担心，如果你没在需要调用contiguous()的地方调用contiguous()，运行时会提示你：
#
# RuntimeError: input is not contiguous
# 只要看到这个错误提示，加上contiguous()就好啦～
#
# 4 其他
#
# 4.1 is_contiguous()函数
#
# is_contiguous() → bool
#
# Returns True if self tensor is contiguous in memory in C order.
# is_contiguous() → bool
#
# 如果该tensor在内存中是连续的则返回True；
# pytorch里面的 contiguous() 是以 C 为顺序保存在内存里面，如果不是，则返回一个以 C 为顺序保存的tensor：
#
# tensor_var.is_contiguous()           # 可以用来判断tensor是否以 C 为顺序保存的
# 一些可能导致不是以 C 为顺序保存的可能为：
#
# import torch
# x = torch.ones(10, 10)
# x.is_contiguous()                                                   # True
# x.transpose(0, 1).is_contiguous()                          # False，transpose会改变tensor变量内存的布局方式
# x.transpose(0, 1).contiguous().is_contiguous()     # True
# 4.2 view()、reshape()函数的差异
#
# 在pytorch 0.4中，增加了torch.reshape()，与 numpy.reshape() 的功能类似，大致相当于 tensor.contiguous().view()，这样就省去了对tensor做view()变换前，调用contiguous()的麻烦；