import numbers

import torch
from einops import rearrange
from timm.models.layers import trunc_normal_
from timm.models import Bottleneck
from torch import nn
import torch.nn.functional as F

from .fredepth_utils import load_checkpoint
from .resnet import MSPANet,mspanet50
from .VisionTransformer import VisionTransformer


class FreDepthEncoder(nn.Module):
    def __init__(self,
                 inv_depth=False,
                 **kwargs):
        super(FreDepthEncoder,self).__init__()
        self.inv_depth = inv_depth
        self.wtresnet = mspanet50()
        self.transformerEncoder = VisionTransformer(**kwargs)

    def init_weights(self, pretrained=None):
        """Initialize the weights in backbone.

        Args:
            pretrained (str, optional): Path to pre-trained weights.
                Defaults to None.
        """

        def _init_weights(m):
            if isinstance(m, nn.Linear):
                trunc_normal_(m.weight, std=.02)
                if isinstance(m, nn.Linear) and m.bias is not None:
                    nn.init.constant_(m.bias, 0)
            elif isinstance(m, nn.LayerNorm):
                nn.init.constant_(m.bias, 0)
                nn.init.constant_(m.weight, 1.0)

        if isinstance(pretrained, str):
            self.apply(_init_weights)
            # logger = get_root_logger()
            load_checkpoint(self, pretrained, strict=False)
        elif pretrained is None:
            self.apply(_init_weights)
        else:
            raise TypeError('pretrained must be a str or None')

    def forward(self,imgs):
        f0,f1,f2,f3 = self.wtresnet(imgs)

        f0,f1,f2,f3 = self.transformerEncoder(imgs,f0,f1,f2,f3)

        feats=[f0,f1,f2,f3]
        return tuple(feats)



if __name__=='__main__':
    x = torch.randn(4,3,480,640)
    model = FreDepthEncoder()
    feats = model(x)
    print(feats[0].shape)
    print(feats[1].shape)
    print(feats[2].shape)
    print(feats[3].shape)


#
# class BiasFree_LayerNorm(nn.Module):
#     def __init__(self, normalized_shape):
#         super(BiasFree_LayerNorm, self).__init__()
#         if isinstance(normalized_shape, numbers.Integral):
#             normalized_shape = (normalized_shape,)
#         normalized_shape = torch.Size(normalized_shape)
#
#         assert len(normalized_shape) == 1
#
#         self.weight = nn.Parameter(torch.ones(normalized_shape))
#         self.normalized_shape = normalized_shape
#
#     def forward(self, x):
#         sigma = x.var(-1, keepdim=True, unbiased=False)
#         return x / torch.sqrt(sigma + 1e-5) * self.weight
#
#
# class WithBias_LayerNorm(nn.Module):
#     def __init__(self, normalized_shape):
#         super(WithBias_LayerNorm, self).__init__()
#         if isinstance(normalized_shape, numbers.Integral):
#             normalized_shape = (normalized_shape,)
#         normalized_shape = torch.Size(normalized_shape)
#
#         assert len(normalized_shape) == 1
#
#         self.weight = nn.Parameter(torch.ones(normalized_shape))
#         self.bias = nn.Parameter(torch.zeros(normalized_shape))
#         self.normalized_shape = normalized_shape
#
#     def forward(self, x):
#         mu = x.mean(-1, keepdim=True)
#         sigma = x.var(-1, keepdim=True, unbiased=False)
#         return (x - mu) / torch.sqrt(sigma + 1e-5) * self.weight + self.bias
#
# class LayerNorm(nn.Module):
#     def __init__(self, dim, LayerNorm_type):
#         super(LayerNorm, self).__init__()
#         if LayerNorm_type == 'BiasFree':
#             self.body = BiasFree_LayerNorm(dim)
#         else:
#             self.body = WithBias_LayerNorm(dim)
#
#     def forward(self, x):
#         h, w = x.shape[-2:]
#         return to_4d(self.body(to_3d(x)), h, w)
#
# ## Multi-DConv Head Transposed Self-Attention (MDTA)
# class Attention(nn.Module):
#     def __init__(self, dim, num_heads, bias):
#         super(Attention, self).__init__()
#         self.num_heads = num_heads
#         self.temperature = nn.Parameter(torch.ones(num_heads, 1, 1))
#
#         self.qkv = nn.Conv2d(dim, dim * 3, kernel_size=1, bias=bias)
#         self.qkv_dwconv = nn.Conv2d(dim * 3, dim * 3, kernel_size=3, stride=1, padding=1, groups=dim * 3, bias=bias)
#         self.project_out = nn.Conv2d(dim, dim, kernel_size=1, bias=bias)
#
#     def forward(self, x):
#         b, c, h, w = x.shape
#
#         qkv = self.qkv_dwconv(self.qkv(x))
#         q, k, v = qkv.chunk(3, dim=1)
#
#         q = rearrange(q, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
#         k = rearrange(k, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
#         v = rearrange(v, 'b (head c) h w -> b head c (h w)', head=self.num_heads)
#
#         q = torch.nn.functional.normalize(q, dim=-1)
#         k = torch.nn.functional.normalize(k, dim=-1)
#
#         attn = (q @ k.transpose(-2, -1)) * self.temperature
#         attn = attn.softmax(dim=-1)
#
#         out = (attn @ v)
#
#         out = rearrange(out, 'b head c (h w) -> b (head c) h w', head=self.num_heads, h=h, w=w)
#
#         out = self.project_out(out)
#         return out
#
# ## Gated-Dconv Feed-Forward Network (GDFN)
# class FeedForward(nn.Module):
#     def __init__(self, dim, ffn_expansion_factor, bias):
#         super(FeedForward, self).__init__()
#
#         hidden_features = int(dim * ffn_expansion_factor)
#
#         self.project_in = nn.Conv2d(dim, hidden_features * 2, kernel_size=1, bias=bias)
#
#         self.dwconv = nn.Conv2d(hidden_features * 2, hidden_features * 2, kernel_size=3, stride=1, padding=1,
#                                 groups=hidden_features * 2, bias=bias)
#
#         self.project_out = nn.Conv2d(hidden_features, dim, kernel_size=1, bias=bias)
#
#     def forward(self, x):
#         x = self.project_in(x)
#         x1, x2 = self.dwconv(x).chunk(2, dim=1)
#         x = F.gelu(x1) * x2
#         x = self.project_out(x)
#         return x
#
# class TransformerBlock(nn.Module):
#     def __init__(self, dim, num_heads, ffn_expansion_factor, bias, LayerNorm_type):
#         super(TransformerBlock, self).__init__()
#
#         self.norm1 = LayerNorm(dim, LayerNorm_type)
#         self.attn = Attention(dim, num_heads, bias)
#         self.norm2 = LayerNorm(dim, LayerNorm_type)
#         self.ffn = FeedForward(dim, ffn_expansion_factor, bias)
#
#     def forward(self, x):
#         x = x + self.attn(self.norm1(x))
#         x = x + self.ffn(self.norm2(x))
#
#         return x
#
# ## Resizing modules
# class Downsample(nn.Module):
#     def __init__(self, in_channel, n_feat):
#         super(Downsample, self).__init__()
#
#         self.body = nn.Sequential(nn.Conv2d(in_channel, n_feat // 2, kernel_size=3, stride=1, padding=1, bias=False),
#                                   nn.PixelUnshuffle(2))
#
#     def forward(self, x):
#         return self.body(x)
#
#
# class VisionTransformer(nn.Module):
#     def __init__(self,
#                  input_channels=50,
#                  out_channels=3,
#                  dim=48,
#                  num_blocks=[3,4,6,3],
#                  num_refinement_block=2,
#                  heads=[1,2,4,8],
#                  ffn_expansion_factor=1,
#                  bias=False,
#                  LayerNorm_type='WithBias', # BiasFree
#                  ):
#         super(VisionTransformer, self).__init__()
#         self.patch_embed = OverlapPatchEmbed(input_channels,dim)
#
#         self.encoder_1 = nn.Sequential(
#             *[TransformerBlock(dim=dim,num_heads=heads[0],ffn_expansion_factor=ffn_expansion_factor,bias=bias,
#                                LayerNorm_type=LayerNorm_type) for i in range(num_blocks[0])])
#
#         # TODO 换成小波下采样
#         self.c_down1 = nn.Conv2d(64+1, 32, kernel_size=3, stride=1, padding=1, bias=False)
#         self.down1_2 = Downsample(dim, dim)  ## From Level 1 to Level 2
#         self.encoder_level2 = nn.Sequential(*[
#             TransformerBlock(dim=int(dim * 2 + 32), num_heads=heads[1], ffn_expansion_factor=ffn_expansion_factor,
#                              bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[1])])
#
#         self.c_down2 = nn.Conv2d(128 + 1, 64, kernel_size=3, stride=1, padding=1, bias=False)
#         self.down2_3 = Downsample(dim * 2 + 32, dim * 2)  ## From Level 2 to Level 3
#         self.encoder_level3 = nn.Sequential(*[
#             TransformerBlock(dim=int(dim * 4 + 64), num_heads=heads[2], ffn_expansion_factor=ffn_expansion_factor,
#                              bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[2])])
#
#         self.c_down3 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias=False)
#         self.down3_4 = Downsample(int(dim * 4 + 64), dim * 4)  ## From Level 3 to Level 4
#         self.latent = nn.Sequential(*[
#             TransformerBlock(dim=int(dim * 8 + 128), num_heads=heads[3], ffn_expansion_factor=ffn_expansion_factor,
#                              bias=bias, LayerNorm_type=LayerNorm_type) for i in range(num_blocks[3])])
#
#     def forward(self, imgs,feats):
#         inputs = torch.cat([imgs, feats], dim=1)
#         H,W = imgs.shape[2:]
#
#         inputs_encoder_1 = self.patch_embed(inputs)
#         out_enc_level1 = self.encoder_level1(inputs_encoder_1)
#
#         f1 = self.c_down1(torch.cat([inputs_encoder_1,feats[1]],dim=1))
#         inp_enc_level2 = self.down1_2(out_enc_level1)
#         inp_enc_level2 = self.SpecFreq2(inp_enc_level2, spatial_size=(H // 2, W // 2))
#         out_enc_level2 = self.encoder_level2(torch.cat([inp_enc_level2, f1], 1))
#         if self.with_cbam:
#             out_enc_level2 = out_enc_level2 + self.cbam_2(out_enc_level2)
#
#         f2 = self.c_down2(torch.cat([ feats[2], feats[2]], 1))
#         inp_enc_level3 = self.down2_3(out_enc_level2)
#         inp_enc_level3 = self.SpecFreq3(inp_enc_level3, spatial_size=(H // 4, W // 4))
#         out_enc_level3 = self.encoder_level3(torch.cat([inp_enc_level3, f2], 1))
#         if self.with_cbam:
#             out_enc_level3 = out_enc_level3 + self.cbam_3(out_enc_level3)
#
#         f3 = self.c_down3(torch.cat([feats[3], feats[3]], 1))
#         inp_enc_level4 = self.down3_4(out_enc_level3)
#         latent = self.latent(torch.cat([inp_enc_level4, f3], 1))
#
#         inp_dec_level3 = self.up4_3(latent)
#         inp_dec_level3 = torch.cat([inp_dec_level3, out_enc_level3], 1)
#         inp_dec_level3 = self.reduce_chan_level3(inp_dec_level3)
#         out_dec_level3 = self.decoder_level3(inp_dec_level3)
#
#         inp_dec_level2 = self.up3_2(out_dec_level3)
#         inp_dec_level2 = torch.cat([inp_dec_level2, out_enc_level2], 1)
#         inp_dec_level2 = self.reduce_chan_level2(inp_dec_level2)
#         out_dec_level2 = self.decoder_level2(inp_dec_level2)
#
#         inp_dec_level1 = self.up2_1(out_dec_level2)
#         inp_dec_level1 = torch.cat([inp_dec_level1, out_enc_level1], 1)
#         out_dec_level1 = self.decoder_level1(inp_dec_level1)
#
#         out_dec_level1 = self.refinement(out_dec_level1)
#         out_dec_level1 = self.output(out_dec_level1)
#
#         return torch.sigmoid(out_dec_level1)
#


# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# backwarp_tenGrid = {}
# def warp(tenInput, tenFlow):
#     k = (str(tenFlow.device), str(tenFlow.size()))
#     if k not in backwarp_tenGrid:
#         tenHorizontal = torch.linspace(-1.0, 1.0, tenFlow.shape[3], device=device).view(
#             1, 1, 1, tenFlow.shape[3]).expand(tenFlow.shape[0], -1, tenFlow.shape[2], -1)
#         tenVertical = torch.linspace(-1.0, 1.0, tenFlow.shape[2], device=device).view(
#             1, 1, tenFlow.shape[2], 1).expand(tenFlow.shape[0], -1, -1, tenFlow.shape[3])
#         backwarp_tenGrid[k] = torch.cat(
#             [tenHorizontal, tenVertical], 1).to(device)
#
#     tenFlow = torch.cat([tenFlow[:, 0:1, :, :] / ((tenInput.shape[3] - 1.0) / 2.0),
#                          tenFlow[:, 1:2, :, :] / ((tenInput.shape[2] - 1.0) / 2.0)], 1)
#
#     g = (backwarp_tenGrid[k] + tenFlow).permute(0, 2, 3, 1)
#     return torch.nn.functional.grid_sample(input=tenInput, grid=g, mode='bilinear', padding_mode='border', align_corners=True)
#
#
# class ResNet_feature(nn.Module):
#
#     def __init__(self, block, num_block):
#         super().__init__()
#
#         self.in_channels = 64
#
#         self.conv1 = nn.Sequential(
#             nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
#             nn.BatchNorm2d(64),
#             nn.ReLU(inplace=True))
#         self.conv2_x = self._make_layer(block, 16, num_block[0], 2)
#         self.conv3_x = self._make_layer(block, 32, num_block[1], 2)
#         self.conv4_x = self._make_layer(block, 64, num_block[2], 2)
#         self.conv5_x = self._make_layer(block, 128, num_block[3], 2)
#
#     def _make_layer(self, block, out_channels, num_blocks, stride):
#         """make resnet layers(by layer i didnt mean this 'layer' was the
#         same as a neuron netowork layer, ex. conv layer), one layer may
#         contain more than one residual block
#
#         Args:
#             block: block type, basic block or bottle neck block
#             out_channels: output depth channel number of this layer
#             num_blocks: how many blocks per layer
#             stride: the stride of the first block of this layer
#
#         Return:
#             return a resnet layer
#         """
#
#         # we have num_block blocks per layer, the first block
#         # could be 1 or 2, other blocks would always be 1
#         strides = [stride] + [1] * (num_blocks - 1)
#         layers = []
#         for stride in strides:
#             layers.append(block(self.in_channels, out_channels, stride))
#             self.in_channels = out_channels * block.expansion
#
#         return nn.Sequential(*layers)
#
#     def forward(self, x, flow):
#         output = self.conv1(x)  # channel: 3 -> 64
#         output = self.conv2_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         f1 = warp(output, flow)
#         output = self.conv3_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         f2 = warp(output, flow)
#         output = self.conv4_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         f3 = warp(output, flow)
#         output = self.conv5_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         f4 = warp(output, flow)
#
#         return [f1, f2, f3, f4]
#
#     def visilize(self, x, flow):
#         output = self.conv1(x)  # channel: 3 -> 64
#         output = self.conv2_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         x1 = output
#         f1 = warp(output, flow)
#         output = self.conv3_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         x2 = output
#         f2 = warp(output, flow)
#         output = self.conv4_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         x3 = output
#         f3 = warp(output, flow)
#         output = self.conv5_x(output)
#         flow = F.interpolate(flow, scale_factor=0.5, mode="bilinear", align_corners=False,
#                              recompute_scale_factor=False) * 0.5
#         x4 = output
#         f4 = warp(output, flow)
#
#         return [f1, f2, f3, f4], [x1, x2, x3, x4]
#
#
# class BasicBlock(nn.Module):
#     """Basic Block for resnet 18 and resnet 34
#
#     """
#
#     # BasicBlock and BottleNeck block
#     # have different output size
#     # we use class attribute expansion
#     # to distinct
#     expansion = 1
#
#     def __init__(self, in_channels, out_channels, stride=1):
#         super().__init__()
#
#         # residual function
#         self.residual_function = nn.Sequential(
#             nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
#             nn.BatchNorm2d(out_channels * BasicBlock.expansion)
#         )
#
#         # shortcut
#         self.shortcut = nn.Sequential()
#
#         # the shortcut output dimension is not the same with residual function
#         # use 1*1 convolution to match the dimension
#         if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
#             self.shortcut = nn.Sequential(
#                 nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
#                 nn.BatchNorm2d(out_channels * BasicBlock.expansion)
#             )
#
#     def forward(self, x):
#         return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
#
# class BottleNeck(nn.Module):
#     """Residual block for resnet over 50 layers
#
#     """
#     expansion = 1
#
#     def __init__(self, in_channels, out_channels, stride=1):
#         super().__init__()
#         self.residual_function = nn.Sequential(
#             nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
#             nn.BatchNorm2d(out_channels),
#             nn.ReLU(inplace=True),
#             nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
#             nn.BatchNorm2d(out_channels * BottleNeck.expansion),
#         )
#
#         self.shortcut = nn.Sequential()
#
#         if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
#             self.shortcut = nn.Sequential(
#                 nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
#                 nn.BatchNorm2d(out_channels * BottleNeck.expansion)
#             )
#
#     def forward(self, x):
#         return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
#
#
# def resnet18():
#     """ return a ResNet 18 object
#     """
#     return ResNet_feature(BasicBlock, [2, 2, 2, 2])
#
#
# def resnet34():
#     """ return a ResNet 34 object
#     """
#     return ResNet_feature(BasicBlock, [3, 4, 6, 3])
#
#
# def resnet50():
#     """ return a ResNet 50 object
#     """
#     return ResNet_feature(BottleNeck, [3, 4, 6, 3])
#
#
# def resnet101():
#     """ return a ResNet 101 object
#     """
#     return ResNet_feature(BottleNeck, [3, 4, 23, 3])
#
#
# def resnet152():
#     """ return a ResNet 152 object
#     """
#     return ResNet_feature(BottleNeck, [3, 8, 36, 3])
