import torch
from torch import nn as nn
from torch.nn import functional as F

from mynn.utils.registry import MODEL_REGISTRY

from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn.init as init
import functools

from mynn.ops.dcn.deform_conv import ModulatedDeformConvPack as DCN

# from dcn.modules.modulated_deform_conv import  ModulatedDeformConvPack as DCN
'''
改进的地方：在2.27M的模型上引入对偶网络
'''


#自校正卷积（用于实现轻量化算法设计）
class SCConv(nn.Module):
    """SCPA is modified from SCNet (Jiang-Jiang Liu et al. Improving Convolutional Networks with Self-Calibrated Convolutions. In CVPR, 2020)
        Github: https://github.com/MCG-NKU/SCNet
    """
    def __init__(self, nf, groups=1, padding=1, reduction=2, stride=1, dilation=1):
        super(SCConv, self).__init__()
        group_width = nf // reduction

        self.conv1_a = nn.Conv2d(nf, group_width, kernel_size=1, bias=False)
        self.conv1_b = nn.Conv2d(nf, group_width, kernel_size=1, bias=False)

        self.k1 = nn.Sequential(
            nn.Conv2d(group_width,
                      group_width,
                      kernel_size=3,
                      stride=stride,
                      padding=dilation,
                      dilation=dilation,
                      bias=False))

        # self.PAConv = PAConv(group_width)
        self.k2 = nn.Sequential(
            nn.AvgPool2d(kernel_size=4, stride=4),
            nn.Conv2d(group_width,
                      group_width,
                      kernel_size=3,
                      stride=1,
                      padding=padding,
                      dilation=dilation,
                      groups=groups,
                      bias=False))
        self.k3 = nn.Conv2d(group_width,
                            group_width,
                            kernel_size=3,
                            stride=1,
                            padding=padding,
                            dilation=dilation,
                            groups=groups,
                            bias=False)
        self.k4 = nn.Conv2d(group_width,
                            group_width,
                            kernel_size=3,
                            stride=stride,
                            padding=padding,
                            dilation=dilation,
                            groups=groups,
                            bias=False)

        self.conv3 = nn.Conv2d(group_width * reduction, nf, kernel_size=1, bias=False)

        self.lrelu = nn.LeakyReLU(negative_slope=0.2, inplace=True)

    def forward(self, x):
        # residual = x

        out_a = self.conv1_a(x)
        out_b = self.conv1_b(x)
        out_a = self.lrelu(out_a)
        out_b = self.lrelu(out_b)

        out_a = self.k1(out_a)
        # out_b = self.PAConv(out_b)
        out_bb = torch.sigmoid(torch.add(out_b, F.interpolate(self.k2(out_b), out_b.size()[2:])))
        out_bb = torch.mul(self.k3(out_b), out_bb)
        out_bb = self.k4(out_bb)  # k4

        out_a = self.lrelu(out_a)
        out_bb = self.lrelu(out_bb)

        out = self.conv3(torch.cat([out_a, out_bb], dim=1))
        # out += residual

        return out


class DC_Align(nn.Module):
    ''' Alignment module using Pyramid, Cascading and Deformable convolution
    with 3 pyramid levels.
    '''
    def __init__(self, nf=64, dc_kernel=3, padding=1, groups=8):
        super(DC_Align, self).__init__()
        # L1: level 1, original spatial size
        self.L1_offset_conv1 = nn.Conv2d(nf * 2, nf, 3, 1, 1, bias=True)  # concat for diff

        self.sc = SCConv(nf)
        # self.L1_offset_conv1_2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)

        self.L1_offset_conv2 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.L1_dcnpack = DCN(nf,
                              nf,
                              dc_kernel,
                              stride=1,
                              padding=padding,
                              dilation=1,
                              deformable_groups=groups,
                              extra_offset_mask=True)

        # self.L1_dcnpack = DCN(nf, nf, dc_kernel, stride=1, padding=padding, dilation=1, deformable_groups=groups)

        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

    def forward(self, nbr_fea_l, ref_fea_l):
        '''align other neighboring frames to the reference frame in the feature level
        nbr_fea_l, ref_fea_l: [L1, L2, L3], each with [B,C,H,W] features
        '''
        L1_offset = torch.cat([nbr_fea_l, ref_fea_l], dim=1)
        L1_offset = self.lrelu(self.L1_offset_conv1(L1_offset))

        L1_offset = self.sc(L1_offset)
        # L1_offset = self.lrelu(self.L1_offset_conv1_2(L1_offset))
        L1_offset = self.lrelu(self.L1_offset_conv2(L1_offset))
        Aligned_feature = self.L1_dcnpack([nbr_fea_l, L1_offset])

        return Aligned_feature


class dual_network(nn.Module):
    def __init__(self):
        super(dual_network, self).__init__()
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3, stride=2, padding=1, bias=False)
        self.conv2 = nn.Conv2d(32, 1, kernel_size=3, stride=2, padding=1, bias=False)
        self.lrelu = nn.LeakyReLU(0.1, inplace=True)

    def forward(self, x):
        out = self.conv1(x)
        out = self.lrelu(out)
        out = self.conv2(out)
        return out


class make_dense(nn.Module):
    def __init__(self, channels_in, channels_out, kernel_size=3):
        super(make_dense, self).__init__()
        self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
        self.conv = nn.Conv2d(channels_in,
                              channels_out,
                              kernel_size=kernel_size,
                              padding=(kernel_size - 1) // 2,
                              bias=False)

    def forward(self, x):
        out = self.leaky_relu(self.conv(x))
        out = torch.cat((x, out), 1)
        return out


class RDB(nn.Module):
    def __init__(self, nDenselayer, channels, growth):
        super(RDB, self).__init__()
        modules = []
        channels_buffer = channels
        for i in range(nDenselayer):
            modules.append(make_dense(channels_buffer, growth))
            channels_buffer += growth
        self.dense_layers = nn.Sequential(*modules)
        self.conv_1x1 = nn.Conv2d(channels_buffer, channels, kernel_size=1, padding=0, bias=False)
        self.weight = nn.Parameter(torch.Tensor([0]))

    def forward(self, x):
        out = self.dense_layers(x)
        out = self.conv_1x1(out)
        # out = out + x
        out = out * self.weight + x
        return out


class Channel_attention(nn.Module):
    def __init__(self, in_c, out_c):
        super(Channel_attention, self).__init__()
        # self.avepool = nn.AdaptiveAvgPool2d((1,1))
        self.conv_c1 = nn.Conv2d(in_c, out_c, kernel_size=1, bias=False)
        self.conv_c2 = nn.Conv2d(out_c, in_c, kernel_size=1, bias=False)
        self.lrelu = nn.LeakyReLU(0.1, inplace=True)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        ca = torch.nn.functional.adaptive_avg_pool2d(x, (1, 1))
        ca = self.conv_c1(ca)
        ca = self.lrelu(ca)
        ca = self.conv_c2(ca)
        attention = self.sigmoid(ca)

        output = torch.mul(attention, x)
        return output


def activation(act_type, inplace=True, neg_slope=0.05, n_prelu=1):
    act_type = act_type.lower()
    if act_type == 'relu':
        layer = nn.ReLU(inplace)
    elif act_type == 'lrelu':
        layer = nn.LeakyReLU(neg_slope, inplace)
    elif act_type == 'prelu':
        layer = nn.PReLU(num_parameters=n_prelu, init=neg_slope)
    else:
        raise NotImplementedError('activation layer [{:s}] is not found'.format(act_type))
    return layer


def conv_layer(in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
    padding = int((kernel_size - 1) / 2) * dilation
    return nn.Conv2d(in_channels,
                     out_channels,
                     kernel_size,
                     stride,
                     padding=padding,
                     bias=bias,
                     dilation=dilation,
                     groups=groups)


class shuffle_RDB(nn.Module):
    def __init__(self, concat_channels, distillation_rate=0.5):
        super(shuffle_RDB, self).__init__()
        self.distilled_channels = int(concat_channels * distillation_rate)
        self.remaining_channels = int(concat_channels - self.distilled_channels)
        self.conv1 = nn.Conv2d(64, 64, 3, 1, 1)
        self.conv2 = nn.Conv2d(64, 64, 3, 1, 1)
        self.conv3 = nn.Conv2d(64, 64, 3, 1, 1)
        self.conv4 = nn.Conv2d(64, 64, 3, 1, 1)
        self.conv5 = nn.Conv2d(64, 64, 3, 1, 1)
        self.bottleneck = nn.Conv2d(128, 64, 1, 1, 0)
        self.act = activation('lrelu', neg_slope=0.1)
        #通道混打乱

    def forward(self, x):
        out = self.act(self.conv1(x))
        out = torch.cat((x, out), 1)
        out = channel_shuffle(out, 8)
        distilled_c1, remaining_c1 = torch.split(out, (self.distilled_channels, self.remaining_channels),
                                                 dim=1)  #channel:64,64
        out = self.act(self.conv2(remaining_c1))
        out = torch.cat((distilled_c1, out), 1)
        out = channel_shuffle(out, 8)
        distilled_c1, remaining_c1 = torch.split(out, (self.distilled_channels, self.remaining_channels), dim=1)
        out = self.act(self.conv3(remaining_c1))
        out = torch.cat((distilled_c1, out), 1)
        out = channel_shuffle(out, 8)
        distilled_c1, remaining_c1 = torch.split(out, (self.distilled_channels, self.remaining_channels), dim=1)
        out = self.act(self.conv4(remaining_c1))
        out = torch.cat((distilled_c1, out), 1)
        out = channel_shuffle(out, 8)
        distilled_c1, remaining_c1 = torch.split(out, (self.distilled_channels, self.remaining_channels), dim=1)
        out = self.act(self.conv5(remaining_c1))
        out = torch.cat((distilled_c1, out), 1)
        out = self.bottleneck(out)
        out = out + x
        return out


#通道打乱
def channel_shuffle(x, groups):
    batchsize, num_channels, height, width = x.data.size()

    channels_per_group = num_channels // groups

    # reshape
    x = x.view(batchsize, groups, channels_per_group, height, width)

    # transpose
    # - contiguous() required if transpose() is used before view().
    #   See https://github.com/pytorch/pytorch/issues/764
    x = torch.transpose(x, 1, 2).contiguous()

    # flatten
    x = x.view(batchsize, -1, height, width)

    return x


class SRnet(nn.Module):
    def __init__(self, upscale_factor):
        super(SRnet, self).__init__()
        self.conv = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
        self.RDB_1 = shuffle_RDB(128)
        self.RDB_2 = shuffle_RDB(128)
        self.RDB_3 = shuffle_RDB(128)
        self.RDB_4 = shuffle_RDB(128)
        self.RDB_5 = shuffle_RDB(128)

        # self.RDB_1 = RDB(5, 64, 64)
        # self.RDB_2 = RDB(5, 64, 64)
        # self.RDB_3 = RDB(5, 64, 64)
        # self.RDB_4 = RDB(5, 64, 64)
        # self.RDB_5 = RDB(5, 64, 64)

        # self.conv = nn.Conv2d(64, 384, 3, 1, 1, bias=False)

        self.conv_2 = nn.Conv2d(384, 64, 1, 1, bias=True)
        self.conv_3 = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
        self.conv_4 = nn.Conv2d(64, upscale_factor**2 * 64, 3, 1, 1, bias=True)
        self.shuffle_x4 = nn.PixelShuffle(upscale_factor=upscale_factor)
        self.reconstrcut = nn.Conv2d(64, 64, 3, 1, 1, bias=True)
        self.act = activation('lrelu', neg_slope=0.1)
        self.ca = Channel_attention(64, 64 // 4)

        #精细化
        self.conv_final1 = nn.Conv2d(64, 32, 3, 1, 1, bias=True)
        self.conv_final2 = nn.Conv2d(32, 16, 3, 1, 1, bias=True)
        self.conv_final3 = nn.Conv2d(16, 1, 3, 1, 1, bias=True)

    def forward(self, x):
        inp = self.conv(x)
        buffer_1 = self.RDB_1(inp)
        buffer_2 = self.RDB_2(buffer_1)
        buffer_3 = self.RDB_3(buffer_2)
        buffer_4 = self.RDB_4(buffer_3)
        buffer_5 = self.RDB_5(buffer_4)
        output_cat = torch.cat((buffer_1, buffer_2, buffer_3, buffer_4, buffer_5, inp), 1)
        output = self.conv_2(output_cat)  #降维
        output = self.act(output)
        output = self.ca(output)  #注意力
        output = self.conv_3(output)
        output = self.act(output)
        output = self.conv_4(output)
        output = self.shuffle_x4(output)
        output = self.reconstrcut(output)
        output = self.conv_final1(output)
        output = self.conv_final2(output)
        output = self.conv_final3(output)
        return output

@MODEL_REGISTRY.register()
class VRCNN_light(nn.Module):
    def __init__(self, upscale_factor=4, center=None, nf=64, nframes=3):
        # def __init__(self,upscale_factor, is_training=False):
        super(VRCNN_light, self).__init__()
        self.upscale_factor = upscale_factor
        self.center = nframes // 2 if center is None else center
        self.conv1 = nn.Conv2d(1, nf, 3, 1, 1, bias=True)
        #functools.partial(a,b,...) 固定函数a中某些参数的值(从左到右顺序固定)，然后返回一个新的函数
        ResidualBlock_noBN_f = functools.partial(ResidualBlock_noBN, nf=nf)
        self.feature_extraction = make_layer(ResidualBlock_noBN_f, 3)

        self.dc_align_3 = DC_Align()
        self.fusion_1 = nn.Conv2d(nf * nframes, nf, 1, 1, bias=True)
        self.fusion_3 = nn.Conv2d(nf, nf, 3, 1, 1, bias=True)
        self.SRnet = SRnet(upscale_factor=upscale_factor)
        self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)

        self.ca = Channel_attention(64, 64 // 4)

    def forward(self, x):

        B, N, C, H, W = x.size()  # N video frames
        x_center = x[:, self.center, :, :, :].contiguous()

        #### extract LR features
        L1_fea = self.lrelu(self.conv1(x.view(-1, C, H, W)))
        L1_fea = self.feature_extraction(L1_fea)

        L1_fea = L1_fea.view(B, N, -1, H, W)  #view()函数类似于reshape()函数的作用

        # #### dc align 3*3
        ref_fea_l = L1_fea[:, self.center, :, :, :].clone()

        aligned_fea = []
        for i in range(N):
            nbr_fea_l = L1_fea[:, i, :, :, :].clone()
            aligned_fea.append(self.dc_align_3(nbr_fea_l, ref_fea_l))
        aligned_fea = torch.stack(aligned_fea, dim=1)  # [B, N, C, H, W]
        aligned_fea1 = []
        for i in range(N):
            nbr_fea_l = aligned_fea[:, i, :, :, :].clone()
            aligned_fea1.append(self.dc_align_3(nbr_fea_l, ref_fea_l))
        aligned_fea1 = torch.stack(aligned_fea1, dim=1)  # [B, N, C, H, W]
        aligned_fea2 = []
        for i in range(N):
            nbr_fea_l = aligned_fea1[:, i, :, :, :].clone()
            aligned_fea2.append(self.dc_align_3(nbr_fea_l, ref_fea_l))
        aligned_fea2 = torch.stack(aligned_fea2, dim=1)  # [B, N, C, H, W]

        #对对齐后的特征进行融合
        output = self.fusion_1(aligned_fea2.view(B, -1, H, W))
        output = self.fusion_3(output)
        output = self.ca(output)
        #特征提取和超分操作
        output = self.SRnet(output)

        base = F.interpolate(x_center, scale_factor=4, mode='bilinear', align_corners=False)
        output += base
        return output


#初始化参数方式
def init_params(self):
    for m in self.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias is not None:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=0.001)
            if m.bias is not None:
                init.constant(m.bias, 0)


def initialize_weights(net_l, scale=1):
    if not isinstance(net_l, list):
        net_l = [net_l]
    for net in net_l:
        for m in net.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal_(m.weight, a=0, mode='fan_in')
                m.weight.data *= scale  # for residual block
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.Linear):
                init.kaiming_normal_(m.weight, a=0, mode='fan_in')
                m.weight.data *= scale
                if m.bias is not None:
                    m.bias.data.zero_()
            elif isinstance(m, nn.BatchNorm2d):
                init.constant_(m.weight, 1)
                init.constant_(m.bias.data, 0.0)


def make_layer(block, n_layers):
    layers = []
    for _ in range(n_layers):
        layers.append(block())
    return nn.Sequential(*layers)


class ResidualBlock_noBN(nn.Module):
    '''Residual block w/o BN
    ---Conv-ReLU-Conv-+-
     |________________|
    '''
    def __init__(self, nf=64):
        super(ResidualBlock_noBN, self).__init__()
        self.conv1 = nn.Conv2d(nf, nf, 3, 1, 2, bias=True, dilation=2)
        self.conv2 = nn.Conv2d(nf, nf, 3, 1, 2, bias=True, dilation=2)

        # initialization
        initialize_weights([self.conv1, self.conv2], 0.1)

    def forward(self, x):
        identity = x
        out = F.relu(self.conv1(x), inplace=True)
        out = self.conv2(out)
        return identity + out
