# -*- coding: utf-8 -*-
# @Time    : 2023/5/16 20:58
# @Author  : Pan
# @Software: PyCharm
# @Project : VisualFramework
# @FileName: Decoder

import paddle
import numpy as np
from paddle import nn
from paddle.nn.functional import pixel_shuffle


class DoubleConvBlock(nn.Layer):
    def __init__(self, in_c, out_c, kernel_size, stride=1, padding=0):
        super(DoubleConvBlock, self).__init__()
        self.conv1 = nn.Sequential(
            nn.Conv2D(in_c, in_c, kernel_size, stride, padding=padding),
            nn.SyncBatchNorm(in_c),
        )
        self.conv2 = nn.Sequential(
            nn.Conv2D(in_c, out_c, 1),
            nn.SyncBatchNorm(out_c)
        )

    def forward(self, x):
        return self.conv2(self.conv1(x))


class DoubleLinear(nn.Layer):
    def __init__(self, parameters):
        super(DoubleLinear, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else 768
        patch_dim = parameters["patch_dim"] if "patch_dim" in parameters.keys() else 96
        self.deep_conv = nn.Conv2D(embed_dim, patch_dim * 64, kernel_size=1)
        self.shallow_conv = nn.Conv2D(patch_dim * 2, patch_dim // 2, kernel_size=1)
        self.pixel_shuffle1 = nn.PixelShuffle(8)  # [B, C, P]
        self.pixel_shuffle2 = nn.PixelShuffle(4)  # [B, C, P]

    def forward(self, x):
        x, y = x[-1], x[0]
        x = self.pixel_shuffle1(self.deep_conv(x))
        out = self.pixel_shuffle2(self.shallow_conv(paddle.concat([x, y], axis=1)))
        return out


class TargetDoubleLinear(nn.Layer):
    def __init__(self, parameters):
        super(TargetDoubleLinear, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else 768
        patch_dim = parameters["patch_dim"] if "patch_dim" in parameters.keys() else 128
        num_embed = parameters["num_embed"] if "num_embed" in parameters.keys() else 256
        decoder_stride = parameters["decoder_stride"] if "decoder_stride" in parameters.keys() else 32
        label_embed = parameters["label_embed"] if "label_embed" in parameters.keys() else 16
        self.deep_conv = nn.Sequential(
            DoubleConvBlock(embed_dim, patch_dim * 32, kernel_size=1),
            nn.SyncBatchNorm(patch_dim * 32),
            nn.PixelShuffle(8),
            DoubleConvBlock(patch_dim // 2, patch_dim, 1),
            nn.SyncBatchNorm(patch_dim)
        )

        self.shallow_conv = nn.Sequential(
            DoubleConvBlock(patch_dim * 2, patch_dim * 2, kernel_size=1),
            nn.SyncBatchNorm(patch_dim * 2),
            nn.PixelShuffle(4),
            DoubleConvBlock(patch_dim // 8, 3, 1)
        )

        # 标签编码 [224, 224] -> [224, 224, 3]
        self.label_embed = nn.Embedding(num_embed, label_embed)

        # 标签加深 [224, 224, 16] -> [56, 56, 128]
        self.embed_conv1 = nn.Sequential(
            nn.Conv2D(label_embed, patch_dim, kernel_size=4, stride=4),
            nn.SyncBatchNorm(patch_dim)
        )

        # 标签加深 [56, 56, 128] -> [7, 7, 1024]
        self.embed_conv2 = nn.Sequential(
            DoubleConvBlock(patch_dim, patch_dim * 2, kernel_size=2, stride=2, padding=0),
            nn.SyncBatchNorm(patch_dim * 2),
            DoubleConvBlock(patch_dim * 2, patch_dim * 4, kernel_size=2, stride=2, padding=0),
            nn.SyncBatchNorm(patch_dim * 4),
            DoubleConvBlock(patch_dim * 4, embed_dim, kernel_size=2, stride=2, padding=0),
            nn.SyncBatchNorm(embed_dim)
        )

    def forward(self, x, c):
        x, y = x[-1], x[0]
        embed1 = self.embed_conv1(self.label_embed(c).transpose([0, 3, 1, 2]))
        embed2 = self.embed_conv2(embed1)
        x = self.deep_conv(x + embed2)
        out = self.shallow_conv(paddle.concat([x, y + embed1], axis=1))
        return out


class TargetDoubleLinear2(nn.Layer):
    def __init__(self, parameters):
        super(TargetDoubleLinear2, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else 768
        patch_dim = parameters["patch_dim"] if "patch_dim" in parameters.keys() else 128
        num_embed = parameters["num_embed"] if "num_embed" in parameters.keys() else 256
        label_embed = parameters["label_embed"] if "label_embed" in parameters.keys() else 16

        """
        假设主干网络学习到的是图像信息，如何讲噪声分离？
        func1 对图像做编码，然后通过concat的方式获得噪声 【需要在解码器重新设计一个图像编码器】
        func2 对图像做channels split, 规定部分通道学习噪声，部分学习特征
        func3 通过图像卷积
        """

        # 获取深度的图像信息
        self.image_deep_conv = nn.Sequential(
            nn.Conv2D(embed_dim, 3 * 32 * 32, kernel_size=1),
            nn.SyncBatchNorm(3 * 32 * 32)
        )

        self.deimage = nn.Conv2D(3 * 32 * 32, 3 * 32 * 32, kernel_size=1)

        # 获取深度的图像信息
        self.noise_deep_conv = nn.Sequential(
            nn.Conv2D(embed_dim, 3 * 32 * 32, kernel_size=1),
            nn.SyncBatchNorm(3 * 32 * 32)
        )

        self.denoise = DoubleConvBlock(6 * 32 * 32, 3 * 32 * 32, kernel_size=1)

        # 标签编码 [224, 224] -> [224, 224, 16]
        self.label_embed = nn.Embedding(num_embed, label_embed)

        # 标签加深 [224, 224, 16] -> [7, 7, 1024]
        self.embed_target_conv = nn.Sequential(
            nn.Conv2D(label_embed, embed_dim, kernel_size=32, stride=32),
            nn.SyncBatchNorm(embed_dim)
        )

    def forward(self, x, c):
        x = x[-1]  # 深层特征 | 浅层特征
        t = self.embed_target_conv(self.label_embed(c).transpose([0, 3, 1, 2]))
        img = self.image_deep_conv(x + t)
        noise = self.noise_deep_conv(x + t)
        noise = self.denoise(paddle.concat([img, noise], axis=1))
        return [pixel_shuffle(self.deimage(img), 32), pixel_shuffle(noise, 32)]


class TribleLinear(nn.Layer):
    def __init__(self, parameters):
        super(TribleLinear, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else 768
        patch_dim = parameters["patch_dim"] if "patch_dim" in parameters.keys() else 96
        self.deep_conv = nn.Conv2D(embed_dim, patch_dim * 64, kernel_size=1)
        self.shallow_conv = nn.Conv2D(patch_dim, patch_dim, kernel_size=1)
        self.mix_conv = nn.Conv2D(patch_dim * 2, patch_dim // 2, kernel_size=1)
        self.pixel_shuffle1 = nn.PixelShuffle(8)  # [B, C, P]
        self.pixel_shuffle2 = nn.PixelShuffle(4)  # [B, C, P]

    def forward(self, x):
        x, y = x[-1], x[0]
        x = self.pixel_shuffle1(self.deep_conv(x))
        y = self.shallow_conv(y)
        out = self.pixel_shuffle2(self.mix_conv(paddle.concat([x], axis=1)))
        return out


class MutilDecoder(nn.Layer):
    def __init__(self, parameters):
        super(MutilDecoder, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else [96, 192, 384, 768]
        decode_stride = parameters["decode_stride"] if "decode_stride" in parameters.keys() else 4
        channel = parameters["channel"] if "channel" in parameters.keys() else 3
        c1_embed_dim, c2_embed_dim, c3_embed_dim, c4_embed_dim = embed_dim
        self.conv_c1 = nn.Conv2D(2 * c1_embed_dim, decode_stride * decode_stride * channel, kernel_size=1)
        self.conv_c2 = nn.Conv2D(2 * c2_embed_dim, c1_embed_dim * 4, kernel_size=1)
        self.conv_c3 = nn.Conv2D(2 * c3_embed_dim, c2_embed_dim * 4, kernel_size=1)
        self.conv_c4 = nn.Conv2D(c4_embed_dim, c3_embed_dim * 4, kernel_size=1)
        self.pixel_shuffle_c1 = nn.PixelShuffle(decode_stride)
        self.pixel_shuffle_c2 = nn.PixelShuffle(2)
        self.pixel_shuffle_c3 = nn.PixelShuffle(2)
        self.pixel_shuffle_c4 = nn.PixelShuffle(2)

    def forward(self, x):
        c1, c2, c3, c4 = x
        c4 = self.pixel_shuffle_c4(self.conv_c4(c4))
        c3 = self.pixel_shuffle_c3(self.conv_c3(paddle.concat([c3, c4], axis=1)))
        c2 = self.pixel_shuffle_c2(self.conv_c2(paddle.concat([c2, c3], axis=1)))
        c1 = self.pixel_shuffle_c1(self.conv_c1(paddle.concat([c1, c2], axis=1)))
        return c1


class TargetDecoder(nn.Layer):
    def __init__(self, parameters):
        super(TargetDecoder, self).__init__()
        embed_dim = parameters["embed_dim"] if "embed_dim" in parameters.keys() else [96, 192, 384, 768]
        decode_stride = parameters["decode_stride"] if "decode_stride" in parameters.keys() else 4
        num_embed = parameters["num_embed"] if "num_embed" in parameters.keys() else 256
        label_embed = parameters["label_embed"] if "label_embed" in parameters.keys() else 16
        channel = parameters["channel"] if "channel" in parameters.keys() else 3
        c1_embed_dim, c2_embed_dim, c3_embed_dim, c4_embed_dim = embed_dim

        self.label_embed = nn.Embedding(num_embed, label_embed)

        self.embed_conv1 = nn.Sequential(
            nn.Conv2D(label_embed, 2 * c1_embed_dim, kernel_size=4, stride=4),
            nn.SyncBatchNorm(2 * c1_embed_dim)
        )

        self.embed_conv2 = nn.Sequential(
            nn.Conv2D(2 * c1_embed_dim, 2 * c2_embed_dim, kernel_size=2, stride=2),
            nn.SyncBatchNorm(2 * c2_embed_dim)
        )

        self.embed_conv3 = nn.Sequential(
            nn.Conv2D(2 * c2_embed_dim, 2 * c3_embed_dim, kernel_size=2, stride=2),
            nn.SyncBatchNorm(2 * c3_embed_dim)
        )

        self.embed_conv4 = nn.Sequential(
            nn.Conv2D(2 * c3_embed_dim, c4_embed_dim, kernel_size=2, stride=2),
            nn.SyncBatchNorm(c4_embed_dim)
        )

        self.conv_c1 = nn.Conv2D(2 * c1_embed_dim, decode_stride * decode_stride * channel, kernel_size=1)
        self.conv_c2 = nn.Conv2D(2 * c2_embed_dim, c1_embed_dim * 4, kernel_size=1)
        self.conv_c3 = nn.Conv2D(2 * c3_embed_dim, c2_embed_dim * 4, kernel_size=1)
        self.conv_c4 = nn.Conv2D(c4_embed_dim, c3_embed_dim * 4, kernel_size=1)
        self.pixel_shuffle_c1 = nn.PixelShuffle(decode_stride)
        self.pixel_shuffle_c2 = nn.PixelShuffle(2)
        self.pixel_shuffle_c3 = nn.PixelShuffle(2)
        self.pixel_shuffle_c4 = nn.PixelShuffle(2)

    def forward(self, x, c):
        c1, c2, c3, c4 = x

        label1 = self.embed_conv1(self.label_embed(c).transpose([0, 3, 1, 2]))
        label2 = self.embed_conv2(label1)
        label3 = self.embed_conv3(label2)
        label4 = self.embed_conv4(label3)

        c4 = self.pixel_shuffle_c4(self.conv_c4(c4 + label4))
        c3 = self.pixel_shuffle_c3(self.conv_c3(label3 + paddle.concat([c3, c4], axis=1)))
        c2 = self.pixel_shuffle_c2(self.conv_c2(label2 + paddle.concat([c2, c3], axis=1)))
        c1 = self.pixel_shuffle_c1(self.conv_c1(label1 + paddle.concat([c1, c2], axis=1)))
        return c1


class MutilHeadDecoder(nn.Layer):
    def __init__(self, parameters):
        super(MutilHeadDecoder, self).__init__()
        self.decoders = [eval(config["type"])(config) for config in parameters]

    def forward(self, x, t, **kwargs):
        outs = []
        for decoder in self.decoders:
            outs.append(decoder(x, t, **kwargs))
        return outs
