import torch
from torch import nn
from .wargs import WorkerArguments

class LSTMEncoder(nn.Module):
    def __init__(self, rnn_num_layers, input_feature_len, sequence_len, hidden_size, bidirectional, device):
        super().__init__()
        self.sequence_len = sequence_len
        self.hidden_size = hidden_size
        self.input_feature_len = input_feature_len
        self.num_layers = rnn_num_layers
        self.rnn_directions = 2 if bidirectional else 1
        self.device=device
        self.lstm = nn.LSTM(
            num_layers=rnn_num_layers,
            input_size=input_feature_len,
            hidden_size=hidden_size,
            batch_first=True,
            bidirectional=bidirectional
        )
 
    def forward(self, input_seq:torch.Tensor):
        ht = torch.zeros(self.num_layers * self.rnn_directions, input_seq.size(0), self.hidden_size).to(self.device)
        ct = ht.clone()
        if input_seq.ndim < 3:
            input_seq.unsqueeze_(2)
        lstm_out:torch.Tensor
        lstm_out, (ht, ct) = self.lstm(input_seq, (ht,ct))
        if self.rnn_directions > 1:
            lstm_out = lstm_out.view(input_seq.size(0), self.sequence_len, self.rnn_directions, self.hidden_size)
            lstm_out = torch.sum(lstm_out, dim=2)
        return lstm_out, ht.squeeze(0)
 
class AttentionDecoderCell(nn.Module):
    def __init__(self, input_feature_len, sequence_len, hidden_size):
        super().__init__()
        # attention - inputs - (decoder_inputs, prev_hidden)
        self.attention_linear = nn.Linear(hidden_size + input_feature_len, sequence_len)
        # attention_combine - inputs - (decoder_inputs, attention * encoder_outputs)
        self.decoder_rnn_cell = nn.LSTMCell(
            input_size=hidden_size,
            hidden_size=hidden_size,
        )
        self.out = nn.Linear(hidden_size, input_feature_len)
 
    def forward(self, encoder_output:torch.Tensor, prev_hidden:torch.Tensor, y:torch.Tensor):
        if prev_hidden.ndimension() == 3:
            prev_hidden = prev_hidden[-1]  # 保留最后一层的信息
        attention_input = torch.cat((prev_hidden, y), dim=1)
        attention_weights = torch.softmax(self.attention_linear(attention_input), dim=-1).unsqueeze(1)
        attention_combine = torch.bmm(attention_weights, encoder_output).squeeze(1)
        rnn_hidden, rnn_hidden = self.decoder_rnn_cell(attention_combine, (prev_hidden, prev_hidden))
        output = self.out(rnn_hidden)
        return output, rnn_hidden
 
 
class Model(nn.Module):
    def __init__(self, opt:WorkerArguments, num_layers=2, teacher_forcing=0.3):
        super().__init__()
        self.output_size = opt.dec_in
        self.input_size = opt.enc_in
        self.pred_len = opt.predict_step
        self.device = opt.device
        self.encoder = LSTMEncoder(num_layers, self.input_size, opt.input_size, opt.d_model, False, self.device)
        self.decoder_type = opt.decoder
        if self.decoder_type == "attention":
            self.decoder_cell = AttentionDecoderCell(self.input_size, opt.input_size, opt.d_model)
            self.teacher_forcing = teacher_forcing
        elif self.decoder_type == "FC":
            self.decoder_cell = nn.Linear(opt.d_model*2,self.pred_len*self.output_size,bias=False)
        
    def __call__(self, xb:torch.Tensor, xb_mark:torch.Tensor, yb:torch.Tensor, yb_mark:torch.Tensor, pretrain):
        input_seq = xb
        encoder_output, encoder_hidden = self.encoder(input_seq)
        if self.decoder_type == 'attention':
            prev_hidden = encoder_hidden
            outputs = torch.zeros(self.pred_len, input_seq.size(0), self.output_size).to(self.device)
            y_prev = input_seq[:, -1, :]
            for i in range(self.pred_len):
                #if (yb is not None) and (i > 0) and (torch.rand(1) < self.teacher_forcing):
                #    y_prev = yb[:, i]
                rnn_output, prev_hidden = self.decoder_cell(encoder_output, prev_hidden, y_prev)
                y_prev = rnn_output
                outputs[i, :, :] = rnn_output[:,-self.output_size:]
            outputs = outputs.permute(1, 0, 2)
        elif self.decoder_type == 'FC':
            encoder_output = encoder_output[:,-1,:]
            encoder_hidden = encoder_hidden[-1,:,:]
            decoder_input = torch.cat((encoder_output,encoder_hidden),dim=1)
            outputs = self.decoder_cell(decoder_input).view(xb.size(0),self.pred_len,self.output_size)
        return outputs