import torch
import torch.nn as nn
import torch.nn.functional as f
#from torchsummary import summary

class CNN_2D_model(nn.Module):
    def __init__(self):
        super(CNN_2D_model, self).__init__()
        # 卷积层1
        self.conv1 = nn.Conv2d(in_channels=1, out_channels=32, kernel_size=3, stride=1, padding=1)
        # 池化层1
        self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # 卷积层2
        self.conv2 = nn.Conv2d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1)
        # 池化层2
        self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # 卷积层3
        self.conv3 = nn.Conv2d(in_channels=16, out_channels=8, kernel_size=3, stride=1, padding=1)
        # 池化层3
        self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)
        # 卷积层4
        self.conv4 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=3, stride=1, padding=1)
        # 池化层4
        self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2, padding=0)

    def forward(self, x):
        # 卷积层1 + 激活函数 + 池化层1
        x = self.pool1(f.relu(self.conv1(x)))
        # 卷积层2 + 激活函数 + 池化层2
        x = self.pool2(f.relu(self.conv2(x)))
        # 卷积层3 + 激活函数 + 池化层3
        x = self.pool3(f.relu(self.conv3(x)))
        # 卷积层4 + 激活函数 + 池化层4
        x = self.pool4(f.relu(self.conv4(x)))
        return x

class CRNN_1D_model(nn.Module):
    def __init__(self):
        super(CRNN_1D_model, self).__init__()
        # 卷积层1
        self.conv1 = nn.Conv1d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1)
        # 池化层1
        self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
        # 卷积层2
        self.conv2 = nn.Conv1d(in_channels=64, out_channels=32, kernel_size=3, stride=1, padding=1)
        # 池化层2
        self.pool2 = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
        # 卷积层3
        self.conv3 = nn.Conv1d(in_channels=32, out_channels=16, kernel_size=3, stride=1, padding=1)
        # 池化层3
        self.pool3 = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
        # 卷积层4
        self.conv4 = nn.Conv1d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=1)
        # 池化层4
        self.pool4 = nn.MaxPool1d(kernel_size=2, stride=2, padding=0)
        # Bi-LSTM1
        self.lstm1 = nn.LSTM(input_size=32, hidden_size=64, num_layers=1, batch_first=True, bidirectional=True)
        # # Bi-LSTM2
        self.lstm2 = nn.LSTM(input_size=128, hidden_size=32, num_layers=1, batch_first=True, bidirectional=True)

    def forward(self, x):
        # 卷积层1 + 激活函数 + 池化层1
        x = self.conv1(x)
        x = self.pool1(f.relu(x))
        # 卷积层2 + 激活函数 + 池化层2
        x = self.pool2(f.relu(self.conv2(x)))
        # 卷积层3 + 激活函数 + 池化层3
        x = self.pool3(f.relu(self.conv3(x)))
        # 卷积层4 + 激活函数 + 池化层4
        x = self.pool4(f.relu(self.conv4(x)))
        # 调整维度以匹配 LSTM 输入要求
        x = x.transpose(1, 2)  # 将通道维度移到最后
        # Bi-LSTM1
        c
        # Bi-LSTM2
        x, _ = self.lstm2(x)
        y = x[:, -1, :]
        return x.transpose(-1, -2)

class ECA_2D_model(nn.Module):
    def __init__(self):
        super(ECA_2D_model, self).__init__()
        self.avg2_pool = nn.AdaptiveAvgPool2d(1)
        self.sigmoid = nn.Sigmoid()
        self.CNN2d = CNN_2D_model()
        self.conv = nn.Conv1d(1, 1, kernel_size=5, padding=(5 - 1) // 2, bias=False)

    def forward(self, x):
        y = self.CNN2d(x)
        output = self.avg2_pool(y)
        # print('eca-2d后的', output.shape)
        output = self.conv(output.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
        output = self.sigmoid(output)
        output = y * output.expand_as(y)
        return output

class ECA_1D_model(nn.Module):
    def __init__(self):
        super(ECA_1D_model, self).__init__()
        self.avg1_pool = nn.AdaptiveAvgPool1d(1)
        self.sigmoid = nn.Sigmoid()
        self.CRNN = CRNN_1D_model()
        self.conv = nn.Conv1d(1, 1, kernel_size=5, padding=(5 - 1) // 2, bias=False)

    def forward(self, x):
        y = self.CRNN(x)
        output = self.avg1_pool(y)
        # print('eca-1d后的', output.shape)
        output = self.conv(output.transpose(-1, -2)).transpose(-1, -2)
        # print(output.shape)
        output = self.sigmoid(output)
        output = y * output.expand_as(y)
        return output

class Concat_Classifier(nn.Module):
    def __init__(self):
        super(Concat_Classifier, self).__init__()
        self.ECA_1D = ECA_1D_model()
        self.ECA_2D = ECA_2D_model()
        self.max2_poll = nn.AdaptiveMaxPool2d(1)
        self.max1_poll = nn.AdaptiveMaxPool1d(1)
        self.sigmoid = nn.Sigmoid()
        self.flatten = nn.Flatten()
        self.fc = nn.Linear(in_features=80*1, out_features=2)

    def forward(self, x, y):
        x = self.ECA_1D(x)
        y = self.ECA_2D(y)
        x_hat = self.max1_poll(x)
        y_hat = self.max2_poll(y)
        result = torch.cat((x_hat, y_hat.squeeze(-1)), dim=1)
        # return x_hat, y_hat.squeeze(-1)
        result = self.flatten(result)
        result = self.sigmoid(self.fc(result))
        return result


if __name__ == '__main__':
    device = torch.device('cuda' if torch.cuda.is_available() else "cpu")
    print(device)
    x1 = torch.randn(100, 1, 3000)
    x2 = torch.randn(100, 1, 149, 39)
    # x1 = data.DataLoader(dataset=x1, batch_size=2, shuffle=True, num_workers=2)
    # x2 = data.DataLoader(dataset=x2, batch_size=2, shuffle=True, num_workers=2)
    x1 = x1.to(device)
    x2 = x2.to(device)

    model1 = CRNN_1D_model().to(device)
    out1 = model1(x1)
    print('module1', out1.shape)

    model2 = CNN_2D_model().to(device)
    out2 = model2(x2)
    print('module2', out2.shape)

    model3 = ECA_2D_model().to(device)
    out3 = model3(x2)
    print('module3', out3.shape)

    model4 = ECA_1D_model().to(device)
    out4 = model4(x1)
    print('module4', out4.shape)

    model5 = Concat_Classifier().to(device)
    for i in range(1, 200):
        out5 = model5(x1, x2)
    print('module5', out5.shape)
    print(out5)














