import torch
import torch.nn as nn
from einops import rearrange


class TCN_GCN_unit(nn.Module):
    def __init__(self, in_channels, out_channels, A, kernel_size=9, block_name='Bottleneck',
                 residual=True, stride=1, **kwargs):
        super(TCN_GCN_unit, self).__init__()
        self.A = A
        graph_num = A.size(0)
        if block_name == 'Bottleneck':
            self.gcn = Spatial_Bottleneck_Block(in_channels, out_channels, graph_num, residual, **kwargs)
            self.tcn = Temporal_Bottleneck_Block(out_channels, out_channels, kernel_size, stride, residual, **kwargs)
        elif block_name == 'Basic':
            self.gcn = Spatial_Basic_Block(in_channels, out_channels, graph_num, residual, **kwargs)
            self.tcn = Temporal_Basic_Block(out_channels, out_channels, kernel_size, stride, residual, **kwargs)
        else:
            raise NotImplementedError

        self.edge = nn.Parameter(torch.ones_like(A))

    def forward(self, x):
        x = self.gcn(x, self.A * self.edge)
        x = self.tcn(x)
        return x


class Temporal_Basic_Block(nn.Module):
    def __init__(self, in_chs, out_chs, kernel_size, stride=1, residual=False, **kwargs):
        super(Temporal_Basic_Block, self).__init__()
        if not residual:
            self.residual = lambda x: 0
        elif in_chs == out_chs and stride == 1:
            self.residual = lambda x: x
        else:
            self.residual = nn.Sequential(
                nn.Conv2d(in_chs, out_chs, 1, (stride, 1), bias=False),
                nn.BatchNorm2d(out_chs),
            )

        if isinstance(kernel_size, int):
            padding = ((kernel_size - 1) // 2, 0)
            self.conv = nn.Sequential(
                nn.Conv2d(in_chs, out_chs, (kernel_size, 1), (stride, 1), padding, groups=2, bias=False),
                nn.BatchNorm2d(out_chs)
            )
        elif isinstance(kernel_size, list):
            self.conv = MultiScale_TemporalConv(in_chs, out_chs, kernel_size, stride)
        else:
            raise ValueError('the kernel_size of tcn is not correct')

        self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        res_block = self.residual(x)
        x = self.conv(x)
        x = self.relu(x + res_block)
        return x


class Temporal_Bottleneck_Block(nn.Module):
    def __init__(self, in_chs, out_chs, kernel_size, stride=1, residual=False, reduction=4, **kwargs):
        super(Temporal_Bottleneck_Block, self).__init__()
        inter_channels = out_chs // reduction

        if not residual:
            self.residual = lambda x: 0
        elif in_chs == out_chs and stride == 1:
            self.residual = nn.Sequential()
        else:
            self.residual = nn.Sequential(
                nn.Conv2d(in_chs, out_chs, 1, stride=(stride, 1), padding=0, bias=False),
                nn.BatchNorm2d(out_chs),
            )

        self.conv_down = nn.Conv2d(in_chs, inter_channels, 1, bias=False)
        self.bn_down = nn.BatchNorm2d(inter_channels)

        if isinstance(kernel_size, int):
            padding = ((kernel_size - 1) // 2, 0)
            self.conv = nn.Sequential(
                nn.Conv2d(inter_channels, inter_channels, (kernel_size, 1), (stride, 1), padding, groups=2, bias=False),
                nn.BatchNorm2d(inter_channels)
            )
        elif isinstance(kernel_size, list):
            self.conv = MultiScale_TemporalConv(inter_channels, inter_channels, kernel_size, stride)
        else:
            raise ValueError('the kernel_size of tcn is not correct')

        self.conv_up = nn.Conv2d(inter_channels, out_chs, 1, bias=False)
        self.bn_up = nn.BatchNorm2d(out_chs)
        self.relu = nn.ReLU()

    def forward(self, x):

        res_block = self.residual(x)

        x = self.conv_down(x)
        x = self.bn_down(x)
        x = self.relu(x)

        x = self.conv(x)
        x = self.relu(x)

        x = self.conv_up(x)
        x = self.bn_up(x)
        x = self.relu(x + res_block)

        return x


class StaticGraphConv(nn.Module):
    def __init__(self, in_channels, out_channels, max_graph_distance):
        super(StaticGraphConv, self).__init__()

        # spatial class number (distance = 0 for class 0, distance = 1 for class 1, ...)
        self.s_kernel_size = max_graph_distance

        # weights of different spatial classes
        self.gcn = nn.Conv2d(in_channels, out_channels * self.s_kernel_size, 1)
        self.bn = nn.BatchNorm2d(out_channels)

    def forward(self, x, A):
        # numbers in same class have same weight
        x = self.gcn(x)
        # divide nodes into different classes
        n, kc, t, v = x.size()
        x = rearrange(x, 'n (k c) t v -> n k c t v', k=self.s_kernel_size)
        # spatial graph convolution
        x = torch.einsum('nkctv,kvw->nctw', (x, A)).contiguous()
        x = self.bn(x)
        return x


class Spatial_Basic_Block(nn.Module):
    def __init__(self, in_channels, out_channels, max_graph_distance, residual=False, **kwargs):
        super(Spatial_Basic_Block, self).__init__()
        if not residual:
            self.residual = lambda x: 0
        elif in_channels == out_channels:
            self.residual = lambda x: x
        else:
            self.residual = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1, bias=False),
                nn.BatchNorm2d(out_channels),
            )

        self.conv = StaticGraphConv(in_channels, out_channels, max_graph_distance)
        self.relu = nn.ReLU(inplace=True)

    def forward(self, x, A):

        res_block = self.residual(x)
        x = self.conv(x, A)
        x = self.relu(x + res_block)

        return x


class Spatial_Bottleneck_Block(nn.Module):
    def __init__(self, in_channels, out_channels, max_graph_distance, residual=True,
                 reduction=4, **kwargs):
        super(Spatial_Bottleneck_Block, self).__init__()

        inter_channels = out_channels // reduction
        if not residual:
            self.residual = lambda x: 0
        elif in_channels == out_channels:
            self.residual = lambda x: x
        else:
            self.residual = nn.Sequential(
                nn.Conv2d(in_channels, out_channels, 1, bias=False),
                nn.BatchNorm2d(out_channels),
            )

        self.conv_down = nn.Sequential(
            nn.Conv2d(in_channels, inter_channels, 1, bias=False),
            nn.BatchNorm2d(inter_channels),
            nn.ReLU()
        )

        self.sconv = StaticGraphConv(inter_channels, inter_channels, max_graph_distance)
        self.conv_up = nn.Sequential(
            nn.Conv2d(inter_channels, out_channels, 1, bias=False),
            nn.BatchNorm2d(out_channels)
        )

        self.relu = nn.ReLU(inplace=True)

    def forward(self, x, A):
        res_block = self.residual(x)
        x = self.conv_down(x)
        x = self.sconv(x, A)
        x = self.relu(x)
        x = self.conv_up(x)
        x = self.relu(x + res_block)
        return x


class MultiScale_TemporalConv(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_list, stride):
        super(MultiScale_TemporalConv, self).__init__()
        self.kernels = kernel_list
        self.M = len(kernel_list)
        self.features = out_channels
        d = max(int(self.features // 2), 16)
        self.inception = nn.ModuleList()

        for i in range(self.M):
            self.inception.append(TConvBnAct(in_channels, self.features, self.kernels[i],
                                             stride=stride, gr=2, dilation=1))

        self.scale = len(self.inception) + 1
        self.sk_layer = nn.Sequential(
            Sk_layer(self.features, d, self.scale),
            nn.BatchNorm2d(self.features)
        )

    def forward(self, x):
        batch_size = x.shape[0]
        outs = [x]
        for layer in self.inception:
            outs.append(layer(x))

        feats = torch.cat(outs, dim=1)
        feats = feats.view(batch_size, self.scale, self.features, feats.shape[2], feats.shape[3])
        feats_V = self.sk_layer(feats)
        return feats_V


class TConvBnAct(nn.Module):
    def __init__(self, in_chs, out_chs, kernel_size, act_layer=nn.ReLU, stride=1, gr=1, dilation=1):
        super(TConvBnAct, self).__init__()
        padding_v = (int(dilation * (kernel_size - 1) // 2), 0)
        self.conv = nn.Conv2d(in_chs, out_chs,
                              kernel_size=(kernel_size, 1),
                              stride=(stride, 1),
                              padding=padding_v,
                              dilation=dilation,
                              bias=False,
                              groups=gr)
        self.bn1 = nn.BatchNorm2d(out_chs)
        self.act1 = act_layer(inplace=True) if act_layer is not None else nn.Sequential()

    def forward(self, x):
        x = self.conv(x)
        x = self.bn1(x)
        x = self.act1(x)
        return x


class Sk_layer(nn.Module):
    def __init__(self, in_dims, mid_dims, scale):
        super(Sk_layer, self).__init__()
        self.gap = nn.AdaptiveAvgPool2d(1)
        self.fc1 = nn.Sequential(
            nn.Conv2d(in_dims, mid_dims, kernel_size=1, bias=False),
            nn.BatchNorm2d(mid_dims),
            nn.ReLU(inplace=True)
        )
        self.fc2 = nn.Conv2d(mid_dims, in_dims * scale, 1, 1, bias=False)
        self.softmax = nn.Softmax(dim=2)

    def forward(self, x):
        # x shape:[B,scale,C,T,V]
        batch_size, num_scale, C, _, _ = x.size()
        feats_S = self.gap(torch.sum(x, dim=1))  # [B,self.features,1,1]
        feats_Z = self.fc1(feats_S)
        attention_vectors = self.fc2(feats_Z).view(batch_size, C, num_scale, 1, 1)
        attention_vectors = self.softmax(attention_vectors).transpose(1, 2)
        fusion_feats = torch.sum(x * attention_vectors, dim=1)
        return fusion_feats
