import torch
import torch.nn as nn
from models.swin import SwinBlockSequence
from einops import rearrange


class SwinDecoder(nn.Module):
    def __init__(self,
                 input_size=(20,20),
                 task_num=2,
                 in_channels=768,
                 window_size=7,
                 mlp_ratio=4,
                 depths=(2, 2, 6, 2),
                 num_heads=(3, 6, 12, 24),
                 qkv_bias=True,
                 qk_scale=None,
                 drop_rate=0.,
                 attn_drop_rate=0.,
                 drop_path_rate=0.1,
                 act_cfg=dict(type='GELU'),
                 norm_cfg=dict(type='LN'),
                 with_cp=False
                 ):
        super(SwinDecoder, self).__init__()

        self.input_h, self.input_w = input_size
        self.task_num = task_num

        num_layers = len(depths)
        total_depth = sum(depths)
        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, total_depth)]
        self.stages = nn.ModuleList()
        for i in range(num_layers):
            if i < num_layers - 1:
                upsample = PatchExpanding(in_channels=in_channels, out_channels=in_channels//2)
            else:
                upsample = None

            stage = SwinBlockSequence(
                embed_dims=in_channels,
                num_heads=num_heads[i],
                feedforward_channels=int(mlp_ratio * in_channels),
                depth=depths[i],
                window_size=window_size,
                qkv_bias=qkv_bias,
                qk_scale=qk_scale,
                drop_rate=drop_rate,
                attn_drop_rate=attn_drop_rate,
                drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],
                downsample=upsample,
                act_cfg=act_cfg,
                norm_cfg=norm_cfg,
                with_cp=with_cp,
                init_cfg=None)
            self.stages.append(stage)
            if upsample:
                in_channels = upsample.out_channels

    def forward(self, x):
        hw_shape = (self.input_h, self.input_w*self.task_num)  # (H,TW)
        for i, stage in enumerate(self.stages):
            x, hw_shape, out, out_hw_shape = stage(x, hw_shape)
            # if i in self.out_indices:
            #     norm_layer = getattr(self, f'norm{i}')
            #     out = norm_layer(out)
            #     out = out.view(-1, *out_hw_shape,
            #                    self.num_features[i]).permute(0, 3, 1, 2).contiguous()

        return out, out_hw_shape


class PatchExpanding(nn.Module):
    def __init__(self, in_channels, out_channels, norm_layer=nn.LayerNorm):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.expand = nn.Linear(in_channels, 2 * in_channels, bias=False)
        self.norm = norm_layer(out_channels)

    def forward(self, x, hw_shape):
        """
        x: B, H*W, C
        """
        x = self.expand(x)
        B, L, C = x.shape

        ########################check#####################
        x = rearrange(x, 'b thw (p c)-> b (thw p) c', p=4, c=C // 4)
        x = self.norm(x)
        hw_shape = (hw_shape[0]*2, hw_shape[1]*2)

        return x, hw_shape


if __name__ == '__main__':
    model = SwinDecoder()
    x = torch.rand(2, 800, 768)
    output = model(x)
    print()