import torch.nn as nn
from MyEfficientNet import cbam_EfficientNet

class Net_b5(nn.Module):
    def __init__(self):
        super(Net_b5, self).__init__()
        self.model = cbam_EfficientNet.from_pretrained('efficientnet-b5', num_classes=1000)
        num_ftrs = self.model._fc.in_features
        self.fc = nn.Linear(num_ftrs, 10)

    def forward(self, img):
        out = self.model(img)
        out = self.fc(out)
        return out

#尝试过将b5和b2的feature map拼接后，加入注意力机制。效果不是很好        
class Net_multi_model(nn.Module):
    def __init__(self):
        super(Net_multi_model, self).__init__()
        
        self.model1 = cbam_EfficientNet.from_pretrained('efficientnet-b5', num_classes=1000)
        self.model2 = cbam_EfficientNet.from_pretrained('efficientnet-b2', num_classes=1000)
        num_ftrs = self.model1._fc.in_features + self.model2._fc.in_features
        # self.dropout = nn.Dropout(0.5)
        self.attention = nn.Sequential(nn.Linear(num_ftrs, num_ftrs//16),
                                nn.ReLU(),
                                nn.Linear(num_ftrs//16, num_ftrs),
                                nn.Sigmoid())
        self.fc = nn.Linear(num_ftrs, 10)


    def forward(self, img):
        out1 = self.model1(img)
        out2 = self.model2(img)
        out = torch.cat((out1, out2), 1)
        atten = self.attention(out)
        # out = self.dropout(out)
        input = torch.mul(atten, out)
        out = self.fc(out)
        return out

