import torch
import torch.nn as nn
import time
from models.bifovnet.adcam import ADCAM
from models.bifovnet.cbam import CBAMBlock
from models.bifovnet.ffem import FFEM
from models.bifovnet.bf3m import BF3M


class Mlp(nn.Module):
    """
    MLP as used in Vision Transformer, MLP-Mixer and related networks
    """
    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
        super().__init__()
        out_features = out_features or in_features
        hidden_features = hidden_features or in_features
        self.fc1 = nn.Linear(in_features, hidden_features)
        self.act = act_layer()
        self.fc2 = nn.Linear(hidden_features, out_features)
        self.drop = nn.Dropout(drop)

    def forward(self, x):
        x = x.permute(0, 2, 3, 1)
        x = self.fc1(x)
        x = self.act(x)
        x = self.drop(x)
        x = self.fc2(x)
        x = self.drop(x)
        x = x.permute(0, 3, 1, 2)
        return x


class Residual(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn

    def forward(self, x):
        return self.fn(x) + x


class BiFovNet(nn.Module):
    def __init__(
            self,
            dim,
            depth,
            kernel_size=7,
            patch_size=16,
            in_chans=3,
            num_classes=1000,
            global_pool='avg',
            drop_rate=0.,
            act_layer=nn.GELU,
            H=224,
            W=224,
            p_h=[8, 4, 2, 1],
            p_w=[8, 4, 2, 1],
            **kwargs,
    ):
        super().__init__()
        self.num_classes = num_classes
        self.num_features = dim

        self.stem_f1 = nn.Sequential(
            nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
            act_layer(),
            nn.BatchNorm2d(dim)
        )

        self.stem_f2 = nn.Sequential(
            nn.Conv2d(in_chans, dim, kernel_size=patch_size, stride=patch_size),
            act_layer(),
            nn.BatchNorm2d(dim)
        )

        self.stage1_f1 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim, mlp_ratio=4.0),
            ) for i in range(depth[0])]
        )

        self.stage1_f2 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim, mlp_ratio=4.0),
            ) for i in range(depth[0])]
        )

        self.norm1_f1 = nn.BatchNorm2d(dim)
        self.norm1_f2 = nn.BatchNorm2d(dim)
        self.adcam1 = ADCAM(dim=dim, kernel_size=7, p_h=p_h[0], p_w=p_w[0], H=H // patch_size,
                                              W=W // patch_size)
        self.cbam1_f1 = CBAMBlock(channel=dim, reduction=16, kernel_size=kernel_size)
        self.cbam1_f2 = CBAMBlock(channel=dim, reduction=16, kernel_size=kernel_size)


        self.downsample2_f1 = nn.Sequential(
            nn.Conv2d(dim, dim * 2, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 2)
        )

        self.downsample2_f2 = nn.Sequential(
            nn.Conv2d(dim, dim * 2, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 2)
        )

        self.stage2_f1 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 2, mlp_ratio=4.0),
            ) for i in range(depth[1])]
        )

        self.stage2_f2 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 2, mlp_ratio=4.0),
            ) for i in range(depth[1])]
        )

        self.norm2_f1 = nn.BatchNorm2d(dim * 2)
        self.norm2_f2 = nn.BatchNorm2d(dim * 2)
        self.adcam2 = ADCAM(dim=dim * 2, kernel_size=7, p_h=p_h[1], p_w=p_w[1], H= H // patch_size // 2,
                                              W= W // patch_size // 2)
        self.cbam2_f1 = CBAMBlock(channel=dim * 2, reduction=16, kernel_size=kernel_size)
        self.cbam2_f2 = CBAMBlock(channel=dim * 2, reduction=16, kernel_size=kernel_size)

        self.downsample3_f1 = nn.Sequential(
            nn.Conv2d(dim * 2, dim * 4, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 4)
        )

        self.downsample3_f2 = nn.Sequential(
            nn.Conv2d(dim * 2, dim * 4, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 4)
        )


        self.stage3_f1 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 4, mlp_ratio=4.0),
            ) for i in range(depth[2])]
        )

        self.stage3_f2 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 4, mlp_ratio=4.0),
            ) for i in range(depth[2])]
        )

        self.norm3_f1 = nn.BatchNorm2d(dim * 4)
        self.norm3_f2 = nn.BatchNorm2d(dim * 4)
        self.adcam3 = ADCAM(dim=dim * 4, kernel_size=7, p_h=p_h[2], p_w=p_w[2], H=H // patch_size // 4,
                                              W=W // patch_size // 4)
        self.cbam3_f1 = CBAMBlock(channel=dim * 4, reduction=16, kernel_size=kernel_size)
        self.cbam3_f2 = CBAMBlock(channel=dim * 4, reduction=16, kernel_size=kernel_size)


        self.downsample4_f1 = nn.Sequential(
            nn.Conv2d(dim * 4, dim * 8, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 8)
        )

        self.downsample4_f2 = nn.Sequential(
            nn.Conv2d(dim * 4, dim * 8, kernel_size=2, stride=2),
            act_layer(),
            nn.BatchNorm2d(dim * 8)
        )

        self.stage4_f1 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 8, mlp_ratio=4.0),
            ) for i in range(depth[3])]
        )

        self.stage4_f2 = nn.Sequential(
            *[nn.Sequential(
                FFEM(dim * 8, mlp_ratio=4.0),
            ) for i in range(depth[3])]
        )

        self.norm4_f1 = nn.BatchNorm2d(dim * 8)
        self.norm4_f2 = nn.BatchNorm2d(dim * 8)

        self.adcam4 = ADCAM(dim=dim * 8, kernel_size=7, p_h=p_h[3], p_w=p_w[3], H=H // patch_size // 8,
                                              W=W // patch_size // 8)
        self.cbam4_f1 = CBAMBlock(channel=dim * 8, reduction=16, kernel_size=kernel_size)
        self.cbam4_f2 = CBAMBlock(channel=dim * 8, reduction=16, kernel_size=kernel_size)

        self.bf3m = BF3M(dim=dim * 8, reduction=16, kernel_size=kernel_size)

        self.pooling = nn.AdaptiveAvgPool2d((1, 1))
        self.flatten = nn.Flatten()
        self.head_drop = nn.Dropout(drop_rate)
        # self.head = nn.Linear(dim * 8 * 2, num_classes) if num_classes > 0 else nn.Identity() #todo
        self.head = nn.Linear(dim * 8 * 2, num_classes) if num_classes > 0 else nn.Identity()
        # 权重初始化
        self._init_weights()

    def _init_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
                if m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                nn.init.constant_(m.weight, 1)
                nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)
            elif isinstance(m, nn.Linear):
                nn.init.trunc_normal_(m.weight, std=.01)
                if m.bias is not None:
                    nn.init.zeros_(m.bias)


    def forward_features(self, x1, x2):
        x1 = self.stem_f1(x1)
        x2 = self.stem_f2(x2)
        attn1, attn2 = self.adcam1(x1, x2)
        x1 = self.norm1_f1(x1 + attn1)
        x2 = self.norm1_f2(x2 + attn2)
        x1 = self.stage1_f1(x1)
        x2 = self.stage1_f2(x2)
        x1 = self.cbam1_f1(x1)
        x2 = self.cbam1_f2(x2)


        x1 = self.downsample2_f1(x1)
        x2 = self.downsample2_f2(x2)
        attn1, attn2 = self.adcam2(x1, x2)
        x1 = self.norm2_f1(x1 + attn1)
        x2 = self.norm2_f2(x2 + attn2)
        x1 = self.stage2_f1(x1)
        x2 = self.stage2_f2(x2)
        x1 = self.cbam2_f1(x1)
        x2 = self.cbam2_f2(x2)


        x1 = self.downsample3_f1(x1)
        x2 = self.downsample3_f2(x2)
        attn1, attn2 = self.adcam3(x1, x2)
        x1 = self.norm3_f1(x1 + attn1)
        x2 = self.norm3_f2(x2 + attn2)
        x1 = self.stage3_f1(x1)
        x2 = self.stage3_f2(x2)
        x1 = self.cbam3_f1(x1)
        x2 = self.cbam3_f2(x2)


        x1 = self.downsample4_f1(x1)
        x2 = self.downsample4_f2(x2)
        attn1, attn2 = self.adcam4(x1, x2)
        x1 = self.norm4_f1(x1 + attn1)
        x2 = self.norm4_f2(x2 + attn2)
        x1 = self.stage4_f1(x1)
        x2 = self.stage4_f2(x2)
        x1 = self.cbam4_f1(x1)
        x2 = self.cbam4_f2(x2)

        return x1, x2

    def forward_head(self, x1, x2, pre_logits: bool = False):
        # x = torch.cat((x1, x2), dim=1)
        x = self.bf3m(x1, x2)
        x = self.pooling(x)
        x = self.flatten(x)
        x = self.head_drop(x)
        return x if pre_logits else self.head(x)

    def forward(self, x1, x2):
        x1, x2 = self.forward_features(x1, x2)
        x = self.forward_head(x1, x2)
        return x

def count_params(model):
    param_num = sum(p.numel() for p in model.parameters())
    return param_num / 1e6



if __name__ == '__main__':
    model = BiFovNet(dim=64, depth=[2, 2, 6, 2], kernel_size=7, patch_size=4, in_chans=1,
                   num_classes=16, H=256, W=256, p_h=[8, 4, 2, 1], p_w=[8, 4, 2, 1])
    start_time = time.time()
    out = model(torch.rand(1, 1, 256, 256), torch.rand(1, 1, 256, 256))
    end_time = time.time()
    print(out.shape)
    print(out)
    print(end_time - start_time)
    print(count_params(model))
