import torch.nn as nn
import torch
from torch.autograd import Variable
import shutil
from pathlib import Path


class Predictor(nn.Module):
    def __init__(self, rnn_type, encoder_input_size, rnn_input_size, rnn_hidden_size, decoder_output_size, layers_num,
                 dropout=0.5):
        super(Predictor, self).__init__()
        self.encoder_input_size = encoder_input_size
        self.rnn_hidden_size = rnn_hidden_size
        self.encoder = nn.Linear(encoder_input_size, rnn_input_size)  # 全连接层，编码器
        self.rnn = getattr(nn, rnn_type)(rnn_input_size, rnn_hidden_size, layers_num, dropout=dropout)  # LSTM
        self.decoder = nn.Linear(rnn_hidden_size, decoder_output_size)  # 全连接层，解码器
        self.init_weights()  # 初始化全连接层的权重和偏置
        self.layers_num = layers_num
        self.dropout = nn.Dropout(dropout)  # 基于伯努利分布抽样，对张量随机置0

    def init_weights(self):
        """
        初始化全连接层的权重和偏置
        :return:
        """
        init_range = 0.1
        self.encoder.weight.data.uniform_(-init_range, init_range)  # 从均匀分布中抽样数值进行填充
        self.encoder.bias.data.fill_(0)
        self.decoder.weight.data.uniform_(-init_range, init_range)
        self.decoder.bias.data.fill_(0)

    def forward(self, input_data, hidden, if_return_hidden=False):
        """
        前向传播
        :param input_data:
        :param hidden:
        :param if_return_hidden:
        :return:
        """
        # 编码输入数据，并做dropout处理
        embedding = self.dropout(self.encoder(input_data.contiguous().view(-1, self.encoder_input_size)))
        embedding = embedding.view(-1, input_data.size(1), self.rnn_hidden_size)

        # 隐藏层
        output, hidden = self.rnn(embedding, hidden)
        output = self.dropout(output)  # 做过dropout处理的隐藏层输出

        # 解码已做过dropout处理的输出数据
        decoded = self.decoder(output.view(output.size(0) * output.size(1), output.size(2)))
        decoded = decoded.view(output.size(0), output.size(1), decoded.size(1))

        if if_return_hidden:
            return decoded, hidden, output
        return decoded, hidden

    def init_hidden(self, batch_size):
        """
        初始化隐藏层
        :param batch_size:
        :return:
        """
        weight = next(self.parameters()).data
        return (Variable(weight.new(self.layers_num, batch_size, self.rnn_hidden_size).zero_()),
                Variable(weight.new(self.layers_num, batch_size, self.rnn_hidden_size).zero_()))

    def repackage_hidden(self, hidden):
        """
        截断计算图，只保留隐藏层的输出值
        :param hidden:
        :return:
        """
        if type(hidden) == tuple:
            return tuple(self.repackage_hidden(v) for v in hidden)
        else:
            return hidden.detach()  # 切断过往梯度，以此作为反向传播的起点

    def save_checkpoint(self, model_dictionary, is_best_loss):
        """
        保存检查点的模型参数
        :param model_dictionary:
        :param is_best_loss:
        :return:
        """
        print('=> Saving arguments of the model at checkpoint...')

        # 保存当前checkpoint模型
        args = model_dictionary['args']
        checkpoint_dir = Path('save', args.data, 'checkpoint')
        checkpoint_dir.mkdir(parents=True, exist_ok=True)
        checkpoint = checkpoint_dir.joinpath(args.filename).with_suffix('.pth')
        torch.save(model_dictionary, checkpoint)

        # 更新最佳模型
        if is_best_loss:
            model_best_dir = Path('save', args.data, 'model_best')
            model_best_dir.mkdir(parents=True, exist_ok=True)
            shutil.copyfile(checkpoint, model_best_dir.joinpath(args.filename).with_suffix('.pth'))

        print('=> Save successfully!')

    def load_checkpoint(self, checkpoint, feature_dim):
        """
        加载检查点的模型参数
        :param checkpoint:
        :param feature_dim:
        :return:
        """
        args = checkpoint['args']
        self.__init__(rnn_type=args.model,
                      encoder_input_size=feature_dim,
                      rnn_input_size=args.input_size,
                      rnn_hidden_size=args.units_num,
                      decoder_output_size=feature_dim,
                      layers_num=args.layers_num,
                      dropout=args.dropout).to(args.device)  # 加载模型
        self.load_state_dict(checkpoint['state_dict'])  # 加载参数
