import torch 
import torch.nn as nn
from torch.nn.modules.activation import Tanh
from miscc.pt_config import cfg

def conv3x3(in_channels, out_channels, stride = 1):
    """
    主要用于改变通道数的卷积
    """
    return nn.Conv2d(in_channels, out_channels, stride = stride, padding = 1, kernel_size = 3, bias = False)

class upSampleBlock(torch.nn.Module):
    """
    上采样层，同时可以改变通道数，并且将原来2D特征的长宽均扩大两倍
    """
    def __init__(self, in_channels, out_channels):
        super(upSampleBlock, self).__init__()
        self.block = nn.Sequential(
            nn.Upsample(scale_factor = 2,
                        mode = 'nearest'),
            conv3x3(in_channels, out_channels),
            nn.BatchNorm2d(out_channels),
            nn.ReLU(True)
        )
    
    def forward(self, x):
        out = self.block(x)
        return out

class ResBlock(nn.Module):
    """
    残差网络部分，不改变通道数和尺寸
    """
    def __init__(self, channel_num):
        super(ResBlock, self).__init__()
        self.block = nn.Sequential(
            conv3x3(channel_num, channel_num),
            nn.BatchNorm2d(channel_num),
            nn.ReLU(True),
            conv3x3(channel_num, channel_num),
            nn.BatchNorm2d(channel_num)
        )
        self.ReLu = nn.ReLU(True)
    
    def forward(self, x):
        res = x
        out = self.block(x)
        out += res
        out = self.ReLu(out)
        return out

class CA_NET(nn.Module):
    """
    将384维的textEmbedding转化为128维的向量，便于提取特征后面计算。
    """
    def __init__(self):
        super(CA_NET, self).__init__()
        self.t_dim = cfg.TEXT.DIMENSION
        self.c_dim = cfg.GAN.CONDITION_DIM
        self.fc = nn.Linear(self.t_dim, self.c_dim * 2, bias=True)
        self.relu = nn.ReLU()

    def encode(self, text_embedding):
        x = self.relu(self.fc(text_embedding))
        mu = x[:, :self.c_dim]
        logvar = x[:, self.c_dim:]
        return mu, logvar

    def reparametrize(self, mu, logvar):

        std = logvar.mul(0.5).exp_()
        if cfg.CUDA:
            eps = torch.cuda.FloatTensor(std.size()).normal_()
        else:
            eps = torch.FloatTensor(std.size()).normal_()
        """
        std = logvar.mul(0.5).exp_()
        std.cuda()
        eps = torch.FloatTensor(std.size()).normal_()
        """
        eps.requires_grad_(False)

        # if cfg.CUDA:
        #     eps.cuda()

        # 更改了
        return eps.mul(std).add_(mu)

    def forward(self, text_embedding):
        mu, logvar = self.encode(text_embedding)
        c_code = self.reparametrize(mu, logvar)
        return c_code, mu, logvar

class D_GET_LOGITS(nn.Module):
    def __init__(self, ndf, nef, bcondition=True):
        super(D_GET_LOGITS, self).__init__()
        self.df_dim = ndf
        self.ef_dim = nef
        self.bcondition = bcondition
        if bcondition:
            self.outlogits = nn.Sequential(
                conv3x3(ndf * 8 + nef, ndf * 8),
                nn.BatchNorm2d(ndf * 8),
                nn.LeakyReLU(0.2, inplace=True),
                nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
                nn.Sigmoid())
        else:
            self.outlogits = nn.Sequential(
                nn.Conv2d(ndf * 8, 1, kernel_size=4, stride=4),
                nn.Sigmoid())

    def forward(self, h_code, c_code=None):
        # conditioning output
        if self.bcondition and c_code is not None:
            c_code = c_code.view(-1, self.ef_dim, 1, 1)
            c_code = c_code.repeat(1, 1, 4, 4)
            # state size (ngf+egf) x 4 x 4
            h_c_code = torch.cat((h_code, c_code), 1)
        else:
            h_c_code = h_code

        output = self.outlogits(h_c_code)
        return output.view(-1)

class Stage1_G(nn.Module):
    def __init__(self):
        super(Stage1_G, self).__init__()
        self.ca_dim = cfg.GAN.CONDITION_DIM
        self.g_dim = cfg.GAN.GF_DIM * 8
        self.z_dim = cfg.Z_DIM

        # 获取噪声加特征的输入向量长度
        input_dim = self.ca_dim + self.z_dim

        self.ca_net = CA_NET()

        # 将噪声加特征的输入向量转化为 g_dim * 4 * 4 个节点，
        # 在前向算法中，这些节点被reshape为 g_dim个通道的 4 * 4 二维矩阵后继续进行下面的上采样
        self.fullConnect = nn.Sequential(
            nn.Linear(input_dim, self.g_dim * 4 * 4, bias = False),
            nn.BatchNorm1d(self.g_dim * 4 * 4),
            nn.ReLU(True)
        )
        
        self.upSampleBlock1 = upSampleBlock(self.g_dim, self.g_dim // 2)
        self.upSampleBlock2 = upSampleBlock(self.g_dim // 2, self.g_dim // 4)
        self.upSampleBlock3 = upSampleBlock(self.g_dim // 4, self.g_dim // 8)
        self.upSampleBlock4 = upSampleBlock(self.g_dim // 8, self.g_dim // 16)

        # 上采样块，最后上采样得到的特征为 64通道的 64 * 64二维矩阵        
        self.upSampleBlocksUnited = nn.Sequential(
            self.upSampleBlock1,
            self.upSampleBlock2,
            self.upSampleBlock3,
            self.upSampleBlock4
        )
        
        # 图像生成块，就是将通道数由64变成3(RGB)，矩阵依然为64 * 64
        self.getImage = nn.Sequential(
            conv3x3(self.g_dim // 16, 3),
            nn.Tanh()
        )

    def forward(self, text_embedding, noise):
        c_code, mu, logvar = self.ca_net(text_embedding)
        z_c_code = torch.cat((noise, c_code), 1)
        h_code = self.fullConnect(z_c_code)

        h_code = h_code.view(-1, self.g_dim, 4, 4)

        h_code = self.upSampleBlocksUnited(h_code)

        fake_image = self.getImage(h_code)
        return None, fake_image, mu, logvar

class Stage1_D(nn.Module):
    def __init__(self):
        super(Stage1_D, self).__init__()
        self.d_dim = cfg.GAN.DF_DIM
        self.ca_dim = cfg.GAN.CONDITION_DIM
        
        # 获得图片的编码特征，实际上是降采样，最终变成 512通道的 4 * 4 二维矩阵
        self.encode_image = nn.Sequential(
            nn.Conv2d(3, self.d_dim, 4, 2, 1, bias = False),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(self.d_dim, self.d_dim * 2, 4, 2, 1, bias = False),
            nn.BatchNorm2d(self.d_dim * 2),
            nn.LeakyReLU(0.2, inplace = True),
            nn.Conv2d(self.d_dim * 2, self.d_dim * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.d_dim * 4),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(self.d_dim * 4, self.d_dim * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.d_dim * 8),
            nn.LeakyReLU(0.2, inplace=True)
        )
        # 通过用上采样得到的特征来计算来自真实数据的可能性，实际上不仅用了上采样得到的特征，还用了textEmbedding
        # 变成128维之后的 4 * 4 二维矩阵
        self.get_cond_logits = D_GET_LOGITS(self.d_dim, self.ca_dim)
        self.get_uncond_logits = None

    def forward(self, image):
        img_embedding = self.encode_image(image)

        return img_embedding

class Stage2_G(nn.Module):
    def __init__(self, Stage1_G):
        super(Stage2_G, self).__init__()
        self.ca_dim = cfg.GAN.CONDITION_DIM
        self.g_dim = cfg.GAN.GF_DIM
        self.z_dim = cfg.Z_DIM
        self.Stage1_G = Stage1_G
        
        for param in self.Stage1_G.parameters():
            param.requires_grad = False

        self.ca_net = CA_NET()
        self.encoder = nn.Sequential(
            conv3x3(3, self.g_dim),
            nn.ReLU(True),
            nn.Conv2d(self.g_dim, self.g_dim * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.g_dim * 2),
            nn.ReLU(True),
            nn.Conv2d(self.g_dim * 2, self.g_dim * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(self.g_dim * 4),
            nn.ReLU(True)
            )

        self.hr_joint = nn.Sequential(
            conv3x3(self.ca_dim + self.g_dim * 4, self.g_dim * 4),
            nn.BatchNorm2d(self.g_dim * 4),
            nn.ReLU(True)
            )
        self.residual = self.makeResLayer(ResBlock, self.g_dim * 4)
        self.upSampleBlock1 = upSampleBlock(self.g_dim * 4, self.g_dim * 2)
        self.upSampleBlock2 = upSampleBlock(self.g_dim * 2, self.g_dim)
        self.upSampleBlock3 = upSampleBlock(self.g_dim, self.g_dim // 2)
        self.upSampleBlock4 = upSampleBlock(self.g_dim // 2, self.g_dim // 4)

        self.upSampleBlocksUnited = nn.Sequential(
            self.upSampleBlock1,
            self.upSampleBlock2,
            self.upSampleBlock3,
            self.upSampleBlock4
        )

        self.getImage = nn.Sequential(
            conv3x3(self.g_dim // 4, 3),
            nn.Tanh()
        )

    def makeResLayer(self, block, channel_num):
        layers = []
        for i in range(cfg.GAN.R_NUM):
            layers.append(block(channel_num))
        return nn.Sequential(*layers)

    def forward(self, text_embedding, noise):
        _, stage1_image, _, _ = self.Stage1_G(text_embedding, noise)
        stage1_image = stage1_image.detach()
        encoded_img = self.encoder(stage1_image)

        c_code, mu, logvar = self.ca_net(text_embedding)
        c_code = c_code.view(-1, self.ca_dim, 1, 1)
        c_code = c_code.repeat(1, 1, 16, 16)
        i_c_code = torch.cat([encoded_img, c_code], 1)
        h_code = self.hr_joint(i_c_code)
        h_code = self.residual(h_code)

        h_code = self.upSampleBlocksUnited(h_code)
        
        fake_img = self.getImage(h_code)
        return stage1_image, fake_img, mu, logvar


class Stage2_D(nn.Module):
    def __init__(self):
        super(Stage2_D, self).__init__()
        self.d_dim = cfg.GAN.DF_DIM
        self.ca_dim = cfg.GAN.CONDITION_DIM
        self.define_module()

    def define_module(self):
        ndf, nef = self.d_dim, self.ca_dim
        self.encode_img = nn.Sequential(
            nn.Conv2d(3, ndf, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 2),
            nn.LeakyReLU(0.2, inplace=True),  
            nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 4),
            nn.LeakyReLU(0.2, inplace=True), 
            nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True),  
            nn.Conv2d(ndf * 8, ndf * 16, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),  
            nn.Conv2d(ndf * 16, ndf * 32, 4, 2, 1, bias=False),
            nn.BatchNorm2d(ndf * 32),
            nn.LeakyReLU(0.2, inplace=True),  
            conv3x3(ndf * 32, ndf * 16),
            nn.BatchNorm2d(ndf * 16),
            nn.LeakyReLU(0.2, inplace=True),   
            conv3x3(ndf * 16, ndf * 8),
            nn.BatchNorm2d(ndf * 8),
            nn.LeakyReLU(0.2, inplace=True)   
        )

        self.get_cond_logits = D_GET_LOGITS(ndf, nef, bcondition=True)
        self.get_uncond_logits = D_GET_LOGITS(ndf, nef, bcondition=False)

    def forward(self, image):
        img_embedding = self.encode_img(image)

        return img_embedding