import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import config.risfnet_config as cfg
from .backbones.CSPDarknet53 import _BuildCSPDarknet53
from .backbones.VGG13 import _BuildVGG13
from .layers.global_context_block import ContextBlock2d

class Conv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, stride=1):
        super(Conv, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(
                in_channels,
                out_channels,
                kernel_size,
                stride,
                kernel_size // 2,
                bias=False,
            ),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(),
        )

    def forward(self, x):
        return self.conv(x)


class SpatialPyramidPooling(nn.Module):
    def __init__(self, feature_channels, pool_sizes=[5, 9, 13]):
        super(SpatialPyramidPooling, self).__init__()

        # head conv
        self.head_conv = nn.Sequential(
            Conv(feature_channels[-1], feature_channels[-1] // 2, 1),
            Conv(feature_channels[-1] // 2, feature_channels[-1], 3),
            Conv(feature_channels[-1], feature_channels[-1] // 2, 1),
        )

        self.maxpools = nn.ModuleList(
            [
                nn.MaxPool2d(pool_size, 1, pool_size // 2)
                for pool_size in pool_sizes
            ]
        )
        self.__initialize_weights()

    def forward(self, x):
        x = self.head_conv(x)
        features = [maxpool(x) for maxpool in self.maxpools]
        features = torch.cat([x] + features, dim=1)

        return features

    def __initialize_weights(self):
        # print("**" * 10, "Initing head_conv weights", "**" * 10) # @@print initing

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0, 0.01)
                if m.bias is not None:
                    m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing


class Upsample(nn.Module):
    def __init__(self, in_channels, out_channels, scale=2):
        super(Upsample, self).__init__()

        self.upsample = nn.Sequential(
            Conv(in_channels, out_channels, 1), nn.Upsample(scale_factor=scale)
        )

    def forward(self, x):
        return self.upsample(x)


class Downsample(nn.Module):
    def __init__(self, in_channels, out_channels, scale=2):
        super(Downsample, self).__init__()

        self.downsample = Conv(in_channels, out_channels, 3, 2)

    def forward(self, x):
        return self.downsample(x)

# 多层感知机，要求特征尺寸固定
class MLP(nn.Module):
    def __init__(self, in_channels, hidden_ratios, h, w):
        super(MLP, self).__init__()

        flat_size = h * w

        layers = []
        prev_size = flat_size
        for hidden_ratio in hidden_ratios:
            hidden_size = int(hidden_ratio ** 2 * h * w)

            layers.append(nn.Linear(prev_size, hidden_size))
            layers.append(nn.BatchNorm1d(in_channels))
            layers.append(nn.ReLU())

            prev_size = hidden_size

        layers.append(nn.Linear(prev_size, flat_size)) # 最终输出形状不变

        self.mlp = nn.Sequential(*layers)

        self.reshape = lambda x: x.view(-1, in_channels, h, w)

    def forward(self, x): # (b, c, h, w)
        residual = x

        x = x.view(x.size(0), x.size(1), -1) # (b, c, h*w)
        x = self.mlp(x) # (b, c, h*w)
        x = self.reshape(x) # (b, c, h, w)

        return x + residual

# 自注意力，使用MLP处理雷达后拼接
class SelfAttention(nn.Module):
    def __init__(
        self,
        radar_channels=[256, 512, 512],
        num_radars=cfg.TRAIN["RADAR_FRAME"]
    ):
        super(SelfAttention, self).__init__()

        self.radar_channels = radar_channels
        self.num_radars = num_radars

        img_size = cfg.TRAIN["TRAIN_IMG_SIZE"]

        self.mlp_layers = nn.ModuleList()
        for _ in range(num_radars): # for each frame MLP
            self.mlp_layers.append(
                nn.ModuleList(
                    [
                        nn.Sequential(
                            MLP( # for 1/8 size
                                radar_channels[0],
                                [
                                    1/2,
                                    1/4,
                                    1/2
                                ],
                                img_size//8,
                                img_size//8
                            ),
                            Conv(
                                radar_channels[0],
                                radar_channels[0]//self.num_radars,
                                1
                            ),
                        ),
                        nn.Sequential(
                            MLP( # for 1/16 size
                                radar_channels[1],
                                [
                                    1/2,
                                    1/4,
                                    1/2
                                ],
                                img_size//16,
                                img_size//16
                            ),
                            Conv(
                                radar_channels[1],
                                radar_channels[1]//self.num_radars,
                                1
                            ),
                        ),
                        nn.Sequential(
                            MLP( # for 1/32 size
                                radar_channels[2],
                                [
                                    1/2,
                                    1/4,
                                    1/2
                                ],
                                img_size//32,
                                img_size//32
                            ),
                            Conv(
                                radar_channels[2],
                                radar_channels[2]//self.num_radars,
                                1
                            ),
                        ),
                    ]
                )
            )

    def forward(self, radars): # tensor 2d list, [[256, 512, 512]*3]
        radars_ = radars.copy()
        for i in range(self.num_radars): # [[256/n, 512/n, 512/n]*3]
            radars_[i] = [
                self.mlp_layers[i][j](radar)
                for j, radar in enumerate(radars_[i])
            ]

        radar_cats = []
        for i in range(len(self.radar_channels)): # each features
            radar_fea = [] # feature size list
            for j in range(self.num_radars): # each frames, n
                radar_fea.append(radars_[j][i]) # (b, c/n, h, w)
            radar_cats.append(torch.concat(radar_fea, dim=1)) # (b, c, h, w)

        return radar_cats # [(b, c, h, w)*3]

# 全局注意力，使用MaxPool的挤压-激励通道注意力
class GlobalAttention(nn.Module):
    def __init__(self, feature_channels=[256, 512, 512]):
        super(GlobalAttention, self).__init__()

        self.layers = nn.ModuleList()
        for feature_channel in feature_channels: # for each feature size
            self.layers.append(
                nn.Sequential(
                    nn.AdaptiveMaxPool2d(1), # (b, c, 1, 1)
                    nn.Conv2d(feature_channel, feature_channel//16, 1), # (b, c/16, 1, 1)
                    nn.ReLU(),
                    nn.Sigmoid(),
                    nn.Conv2d(feature_channel//16, feature_channel, 1), # (b, c, 1, 1)
                )
            )

    def forward(self, features): # tensor list, [(b, c, h, w)*n]
        features = [ # (b, c, 1, 1) * (b, c, h, w)
            layer(feature) * feature
            for layer, feature in zip(self.layers, features)
        ]

        return features


class PANet(nn.Module):
    def __init__(self, feature_channels):
        super(PANet, self).__init__()

        self.feature_transform3 = Conv(
            feature_channels[0], feature_channels[0] // 2, 1
        )
        self.feature_transform4 = Conv(
            feature_channels[1], feature_channels[1] // 2, 1
        )

        self.resample5_4 = Upsample(
            feature_channels[2] // 2, feature_channels[1] // 2
        )
        self.resample4_3 = Upsample(
            feature_channels[1] // 2, feature_channels[0] // 2
        )
        self.resample3_4 = Downsample(
            feature_channels[0] // 2, feature_channels[1] // 2
        )
        self.resample4_5 = Downsample(
            feature_channels[1] // 2, feature_channels[2] // 2
        )

        self.downstream_conv5 = nn.Sequential(
            Conv(feature_channels[2], feature_channels[2] // 2, 1),
            Conv(feature_channels[2] // 2, feature_channels[2], 3),
            Conv(feature_channels[2], feature_channels[2] // 2, 1),
        )
        self.downstream_conv4 = nn.Sequential(
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
            Conv(feature_channels[1] // 2, feature_channels[1], 3),
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
            Conv(feature_channels[1] // 2, feature_channels[1], 3),
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
        )
        self.downstream_conv3 = nn.Sequential(
            Conv(feature_channels[0], feature_channels[0] // 2, 1),
            Conv(feature_channels[0] // 2, feature_channels[0], 3),
            Conv(feature_channels[0], feature_channels[0] // 2, 1),
            Conv(feature_channels[0] // 2, feature_channels[0], 3),
            Conv(feature_channels[0], feature_channels[0] // 2, 1),
        )

        self.upstream_conv4 = nn.Sequential(
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
            Conv(feature_channels[1] // 2, feature_channels[1], 3),
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
            Conv(feature_channels[1] // 2, feature_channels[1], 3),
            Conv(feature_channels[1], feature_channels[1] // 2, 1),
        )
        self.upstream_conv5 = nn.Sequential(
            Conv(feature_channels[2], feature_channels[2] // 2, 1),
            Conv(feature_channels[2] // 2, feature_channels[2], 3),
            Conv(feature_channels[2], feature_channels[2] // 2, 1),
            Conv(feature_channels[2] // 2, feature_channels[2], 3),
            Conv(feature_channels[2], feature_channels[2] // 2, 1),
        )
        self.__initialize_weights()

    def forward(self, features):
        features = [
            self.feature_transform3(features[0]),
            self.feature_transform4(features[1]),
            features[2],
        ]

        downstream_feature5 = self.downstream_conv5(features[2])
        downstream_feature4 = self.downstream_conv4(
            torch.cat(
                [features[1], self.resample5_4(downstream_feature5)], dim=1
            )
        )
        downstream_feature3 = self.downstream_conv3(
            torch.cat(
                [features[0], self.resample4_3(downstream_feature4)], dim=1
            )
        )

        upstream_feature4 = self.upstream_conv4(
            torch.cat(
                [self.resample3_4(downstream_feature3), downstream_feature4],
                dim=1,
            )
        )
        upstream_feature5 = self.upstream_conv5(
            torch.cat(
                [self.resample4_5(upstream_feature4), downstream_feature5],
                dim=1,
            )
        )

        return [downstream_feature3, upstream_feature4, upstream_feature5]

    def __initialize_weights(self):
        # print("**" * 10, "Initing PANet weights", "**" * 10) # @@print initing

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0, 0.01)
                if m.bias is not None:
                    m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing


class PredictNet(nn.Module):
    def __init__(self, feature_channels, target_channels):
        super(PredictNet, self).__init__()

        self.predict_conv = nn.ModuleList(
            [
                nn.Sequential(
                    Conv(feature_channels[i] // 2, feature_channels[i], 3),
                    nn.Conv2d(feature_channels[i], target_channels, 1),
                )
                for i in range(len(feature_channels))
            ]
        )
        self.__initialize_weights()

    def forward(self, features):
        predicts = [
            predict_conv(feature)
            for predict_conv, feature in zip(self.predict_conv, features)
        ]

        return predicts

    def __initialize_weights(self):
        # print("**" * 10, "Initing PredictNet weights", "**" * 10) # @@print initing

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0, 0.01)
                if m.bias is not None:
                    m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

                # print("initing {}".format(m)) # @@print initing


class RISFNet(nn.Module):
    def __init__(self, weight_path=None, out_channels=255, resume=False, showatt=False, radar_frame=cfg.TRAIN["RADAR_FRAME"]):
        super(RISFNet, self).__init__()
        self.showatt = showatt

        # for Temporal Position Encoding, sin(n + k) / n
        self.positions = [math.sin((radar_frame+k)/radar_frame) for k in [i-radar_frame+1 for i in range(radar_frame)]]

        # CSPDarknet53 backbone for image
        self.backbone, feature_channels = _BuildCSPDarknet53( # [256, 512, 512]
            weight_path=weight_path, resume=resume
        )

        # attention heatmap
        if self.showatt:
            self.attention = ContextBlock2d(feature_channels[-1], feature_channels[-1])

        # VGG-13 backbone for RPDM, run n times with share mode
        self.radar_backbone, radar_channels = _BuildVGG13() # [256, 512, 512]

        # Self Attention
        self.self_atten = SelfAttention(radar_channels) # input radar tensor 2d list

        # Global Attention
        assert feature_channels == radar_channels, "channels mismatch"
        self.global_atten = GlobalAttention(feature_channels) # input tensor list, shared for image and radar

        # Path Aggregation Net, 论文中是特征金字塔网络FPN
        mix_channels = [chann*2 for chann in feature_channels] # [512, 1024, 1024]
        self.panet = PANet(mix_channels)

        # predict
        self.predict_net = PredictNet(mix_channels, out_channels)

    def forward(self, x, y): # img, radar
        b, c, h, w = x.shape
        atten = None

        features = self.backbone(x) # tensor list, [256, 512, 512]
        if self.showatt:
            features[-1], atten = self.attention(features[-1])

        radar_list = y.view(b, c, -1, h, w).unbind(dim=2) # tensor list, [(b, c, h, w)*n]
        radars = [ # tensor 2d list, [[256, 512, 512]*n]
            self.radar_backbone(radar)
            for radar in radar_list
        ]

        radars = self.position_encoding(radars, self.positions) # tensor * position, tensor 2d list, [[256, 512, 512]*n]

        radar_features = self.self_atten(radars) # tensor list, [256, 512, 512]

        features = self.global_atten(features) # [256, 512, 512]
        radar_features = self.global_atten(radar_features) # [256, 512, 512]

        # two features are connected on the channel
        mix_features = [] # [512, 1024, 1024]
        for fea, r_fea in zip(features, radar_features):
            mix_features.append(torch.concat((fea, r_fea), dim=1))

        mix_features = self.panet(mix_features) # [512, 1024, 1024]

        predicts = self.predict_net(mix_features)

        return predicts, atten

    # Temporal Position Encoding
    def position_encoding(self, radars, positions):
        assert len(radars) == len(positions), "radar frame mismatch"
        radars_ = radars.copy()
        for i, radar in enumerate(radars):
            radars_[i] = [r*p for (r, p) in zip(radar, positions)]
        return radars_

if __name__ == "__main__":
    cuda = torch.cuda.is_available()
    device = torch.device("cuda:{}".format(0) if cuda else "cpu")
    model = RISFNet().to(device)
    x = torch.randn(1, 3, 160, 160).to(device)
    y = torch.randn(1, 9, 160, 160).to(device)
    torch.cuda.empty_cache()
    while 1:
        predicts = model(x, y)
        print(predicts[0].shape)
        print(predicts[1].shape)
        print(predicts[2].shape)
