import torch
from torch import nn
from .attention import Attention, MyAttentionLayer


class Bigru_self_attention_model(nn.Module):
    def __init__(self, configs):
        super(Bigru_self_attention_model, self).__init__()

        self.bigru_block1 = nn.GRU(configs.input_channels, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer1 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block2 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bigru_block3 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        # self.bigru_block4 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=configs.dropout)
        #
        # self.bigru_block5 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        model_output_dim = configs.features_len
        self.logits = nn.Linear(model_output_dim * configs.final_out_channels * 3, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        # print(x_in.shape)
        output1, hidden1 = self.bigru_block1(x_in)

        # -------------------注意力层-----------------------------
        # print("output[0].shape:", output[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer1(output1)
        # print("output[0].shape:", output[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bigru_block2(output1)
        output_2 = self.attention_layer2(output2)
        # skips.append(output_2)

        output3, hidden3 = self.bigru_block3(output2)
        output_3 = self.attention_layer3(output3)
        # skips.append(output_3)

        # output4, hidden4 = self.bigru_block4(output3)
        # output_4 = self.attention_layer4(output4)
        # # skips.append(output_4)
        #
        # output5, hidden5 = self.bigru_block5(output4)
        # output_5 = self.attention_layer5(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        # final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        final_output = torch.cat((output_1,output_2,output_3),1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        # print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output


# biblstm_self_attention_model
class biblstm_self_attention_model(nn.Module):
    def __init__(self, configs):
        super(biblstm_self_attention_model, self).__init__()

        self.bilstm_block1 = nn.LSTM(configs.input_channels, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer1 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bilstm_block2 = nn.LSTM(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        self.bilstm_block3 = nn.LSTM(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
                                   dropout=configs.dropout)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        # self.bigru_block4 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=configs.dropout)
        #
        # self.bigru_block5 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True,
        #                            dropout=configs.dropout)
        # self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=configs.dropout)

        model_output_dim = configs.features_len
        self.logits = nn.Linear(model_output_dim * configs.final_out_channels * 3, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        # print(x_in.shape)
        output1, hidden1 = self.bilstm_block1(x_in)
        # -------------------注意力层-----------------------------
        # print("output[0].shape:", output[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer1(output1)
        # print("output[0].shape:", output[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bilstm_block2(output1)
        output_2 = self.attention_layer2(output2)
        # skips.append(output_2)

        output3, hidden3 = self.bilstm_block3(output2)
        output_3 = self.attention_layer3(output3)
        # skips.append(output_3)

        # output4, hidden4 = self.bigru_block4(output3)
        # output_4 = self.attention_layer4(output4)
        # # skips.append(output_4)
        #
        # output5, hidden5 = self.bigru_block5(output4)
        # output_5 = self.attention_layer5(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        # final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        final_output = torch.cat((output_1, output_2, output_3),1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        # print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output





class Bigru_self_attention_backup_model(nn.Module):
    def __init__(self, configs):
        super(Bigru_self_attention_backup_model, self).__init__()

        self.bigru_block1 = nn.GRU(configs.input_channels, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block2 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer2 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block3 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer3 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block4 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer4 = Attention(configs.features_len, heads=1, dropout=0.1)

        self.bigru_block5 = nn.GRU(configs.input_channels * 2, 9, 1, batch_first=True, bidirectional=True, dropout=0.1)
        self.attention_layer5 = Attention(configs.features_len, heads=1, dropout=0.1)

        model_output_dim = configs.features_len
        self.logits = nn.Linear(model_output_dim * configs.final_out_channels * 5, configs.num_classes)

    def forward(self, x_in):
        # skips = []
        print(x_in.shape)
        output1, hidden1 = self.bigru_block1(x_in)

        # -------------------注意力层-----------------------------
        # print("output[0].shape:", output[0].shape)
        # print("hidden.shape:", hidden.shape)
        output_1  = self.attention_layer(output1)
        # print("output[0].shape:", output[0].shape)
        # print("output.shape:", output.shape)
        # skips.append(output_1)
        # ------------------------------------------------
        output2, hidden2 = self.bigru_block2(output1)
        output_2 = self.attention_layer(output2)
        # skips.append(output_2)

        output3, hidden3 = self.bigru_block2(output2)
        output_3 = self.attention_layer(output3)
        # skips.append(output_3)

        output4, hidden4 = self.bigru_block2(output3)
        output_4 = self.attention_layer(output4)
        # skips.append(output_4)

        output5, hidden5 = self.bigru_block2(output4)
        output_5 = self.attention_layer(output5)
        # skips.append(output_5)
        # print("skips.shape:", skips)

        final_output = torch.cat((output_1,output_2,output_3,output_4,output_5),1)
        # final_output = torch.add((output_1,output_2,output_3,output_4,output_5),1)
        print("final_output.shape:", final_output.shape)

        x_flat = final_output.reshape(final_output.shape[0], -1)
        # print("x_flat.shape:", x_flat.shape)

        logits = self.logits(x_flat)
        return logits, final_output