import torch
from torch import nn
from einops import rearrange
from .layers import TCN_GCN_unit, MulScale_NTUDGraph
# from models.backbones.layers import TCN_GCN_unit, MulScale_NTUDGraph


class DataBN(nn.Module):
    def __init__(self, in_channels, num_point, num_person):
        super().__init__()
        self.bn = nn.BatchNorm1d(num_person * in_channels * num_point)

    def forward(self, x):
        N, C, T, V, M = x.size()
        x = rearrange(x, 'n c t v m -> n (m v c) t')  # (N, M * V * C, T)
        x = self.bn(x)
        x = rearrange(x, 'n (m v c) t -> (n m) c t v', m=M, v=V)  # (N * M, C, T, V)
        return x


class Input_Branch(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size, A,
                 block_name, num_person, num_point, **kwargs):
        super(Input_Branch, self).__init__()
        self.bn = DataBN(in_channels, num_point, num_person)

        module_list = [TCN_GCN_unit(in_channels, out_channels, A, kernel_size,
                                    block_name='Basic', residual=False, **kwargs)]
        module_list += [TCN_GCN_unit(out_channels, out_channels, A, kernel_size,
                                     block_name, **kwargs)]
        module_list += [TCN_GCN_unit(out_channels, out_channels, A, kernel_size,
                                     block_name, **kwargs)]
        self.layers = nn.ModuleList(module_list)

    def forward(self, x):
        # input x shape:(N, C, T, V, M)
        x = self.bn(x)
        for layer in self.layers:
            x = layer(x)
        return x

class Graph_pooling(nn.Module):
    def __init__(self, in_dims, part_matrix):
        super(Graph_pooling, self).__init__()
        num_nodes, num_part = part_matrix.size()
        self.part_matrix = part_matrix
        self.edge_importance = nn.Parameter(torch.ones_like(part_matrix))
        self.gcn = nn.Conv2d(in_dims, in_dims, 1, bias=False)
        inter_channel = in_dims // 4
        self.part_atten = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(in_dims, inter_channel, 1, bias=False),
            nn.BatchNorm2d(inter_channel),
            nn.ReLU(inplace=True),
            nn.Conv2d(inter_channel, in_dims * num_part, 1, bias=False),
        )
        self.softmax = nn.Softmax(dim=-1)
        self.bn = nn.BatchNorm2d(in_dims)
        self.bn_att = nn.BatchNorm2d(in_dims)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        # x shape:[B,C,T,V], part_masks shape:[V,p]
        part_matrix = self.part_matrix.to(x.device)
        parm_parts_feats = torch.matmul(x, part_matrix * self.edge_importance)
        parm_parts_feats = self.gcn(parm_parts_feats)
        parm_parts_feats = self.bn(parm_parts_feats)

        parts_feats = torch.matmul(x, part_matrix)  # [B,C,T,p],Sum
        N, C, T, P = parts_feats.size()
        parts_imp = self.softmax(self.part_atten(parts_feats).view(N, C, P))
        parts_imp = parts_feats * parts_imp.unsqueeze(-2)
        parts_imp = self.bn_att(parts_imp)
        outs = self.relu(parm_parts_feats + parts_imp)
        return outs


class MSSTGCN_V1(nn.Module):
    def __init__(self, in_channels=3,
                 base_channel=64,
                 kernel_size=9,
                 graph='ntu_rgb_d',
                 graph_args=dict(),
                 num_point=25,
                 num_person=1,
                 num_branches=2,
                 block_name='Bottleneck',
                 **kwargs):
        super(MSSTGCN_V1, self).__init__()

        if graph == 'ntu_rgb_d':
            self.graph = MulScale_NTUDGraph(**graph_args)
        else:
            raise ValueError()

        # torch.tensor deep copy, does not share memory
        self.A_1 = nn.Parameter(torch.tensor(self.graph.A_1,dtype= torch.float), requires_grad=False)
        self.A_2 = nn.Parameter(torch.tensor(self.graph.A_2,dtype= torch.float), requires_grad=False)
        self.A_3 = nn.Parameter(torch.tensor(self.graph.A_3,dtype= torch.float), requires_grad=False)

        self.gpool_mask_1 = torch.tensor(self.graph.mask_norm_2,dtype= torch.float)
        self.gpool_mask_2 = torch.tensor(self.graph.mask_norm_3,dtype= torch.float)

        self.base_channel = base_channel
        # input branches
        self.input_branches = nn.ModuleList([
            Input_Branch(in_channels, base_channel, kernel_size, self.A_1,
                         block_name, num_person, num_point, **kwargs)
            for _ in range(num_branches)
        ])
        branch_channel = base_channel * num_branches

        # main stream
        self.level_1 = nn.Sequential(
            TCN_GCN_unit(branch_channel, self.base_channel * 2, self.A_1, kernel_size,
                         block_name, stride=2, **kwargs),
            TCN_GCN_unit(self.base_channel * 2, self.base_channel * 2, self.A_1, kernel_size,
                         block_name, **kwargs),
            TCN_GCN_unit(self.base_channel * 2, self.base_channel * 2, self.A_1, kernel_size,
                         block_name, **kwargs)
        )
        self.gpool_1 = Graph_pooling(self.base_channel * 2, self.gpool_mask_1)
        self.level_2 = nn.Sequential(
            TCN_GCN_unit(self.base_channel * 2, self.base_channel * 4, self.A_2, kernel_size,
                         block_name, stride=2, **kwargs),
            TCN_GCN_unit(self.base_channel * 4, self.base_channel * 4, self.A_2, kernel_size,
                         block_name, **kwargs)
        )
        self.gpool_2 = Graph_pooling(self.base_channel * 4, self.gpool_mask_2)

        self.level_3 = nn.Sequential(
            TCN_GCN_unit(self.base_channel * 4, self.base_channel * 4, self.A_3, kernel_size,
                         block_name, **kwargs),
            # TCN_GCN_unit(self.base_channel * 4, self.base_channel * 4, self.A_3, kernel_size,
            #              block_name, **kwargs)
        )
        # output
        self.global_pooling = nn.AdaptiveAvgPool2d(1)
        # init parameters
        init_param(self.modules())

    def forward(self, x):
        N, I, C, T, V, M = x.size()
        # input branches
        x_cat = []
        for i, branch in enumerate(self.input_branches):
            x_cat.append(branch(x[:, i, :, :, :, :]))
        x = torch.cat(x_cat, dim=1)


        x = self.level_1(x)
        x = self.gpool_1(x)
        x = self.level_2(x)
        x = self.gpool_2(x)
        x = self.level_3(x)
        # output
        x = self.global_pooling(x) # (N*M, C ,1,1)
        x = rearrange(x, '(b m) c t v -> b m c t v', m=M).mean(axis=1)  # N,C,1,1
        return x


def init_param(modules):
    for m in modules:
        if isinstance(m, nn.Conv1d) or isinstance(m, nn.Conv2d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
            nn.init.constant_(m.weight, 1)
            nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.Linear):
            nn.init.normal_(m.weight, std=0.001)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)




if __name__ == '__main__':
    B, I, C, T, V, M = 2, 2, 3, 300, 25, 1
    x = torch.rand(size=[B,I,C,T,V,M])
    md = MSSTGCN()
    y = md(x)
    print(y.shape)



