import torch.nn.functional as F

from training.models.efficientnet_blocks import ConvBnAct
from training.models.gbb import GradientBoostNet
from training.models.sifdnet import _upsample_like
from training.models.sifdnetv2 import SIFDNetV2, ALAM

__all__ = ['sifdnet_v2_2']


class SIFDNetV2_2(SIFDNetV2):

    def __init__(self, num_classes, map_classes, drop_rate, pretrained):
        super(SIFDNetV2_2, self).__init__(num_classes, map_classes, drop_rate, pretrained)
        self.alam2 = ALAM(in_channels=self.num_chs[2], out_channels=self.num_chs[2], size=(32, 32))
        self.sobel_stream = GradientBoostNet(self.num_chs, filter_type="sobel")
        self.fusion_conv = ConvBnAct(self.num_chs[4] * 2, self.num_chs[4], kernel_size=1, pad_type='same')

    def forward(self, inputs):
        features = self.encoder(inputs)

        features[3], masks1 = self.alam1(features[3], features[4])
        features[2], masks2 = self.alam2(features[2], features[2])
        features[0], masks3 = self.alam3(features[0], features[1])
        x = self.sobel_stream(features[0], features[2], features[3])
        x += features[4]
        # x = torch.cat((x, features[4]), dim=1)
        # x = self.fusion_conv(x)
        aspp_feat = self.aspp(x)

        x = self.global_pool(aspp_feat)
        if self.drop_rate > 0.:
            x = F.dropout(x, p=self.drop_rate, training=self.training)
        x = self.classifier(x)

        masks_pred = self.decoder(aspp_feat, features[3], features[2], features[0])
        masks1 = _upsample_like(masks1, masks_pred)
        masks2 = _upsample_like(masks2, masks_pred)
        masks3 = _upsample_like(masks3, masks_pred)

        return x, masks_pred, [masks1, masks2, masks3]


def sifdnet_v2_2(num_classes=2, map_classes=2, drop_rate=0., pretrained=False):
    model = SIFDNetV2_2(num_classes=num_classes, map_classes=map_classes, drop_rate=drop_rate, pretrained=pretrained)
    model.default_cfg = model.encoder.default_cfg
    return model
