import torch
import torch.nn as nn
import torchvision
from torchvision import models, transforms
from torchinfo import summary
import torch.nn.functional as F

class AE_FC(nn.Module):
    def __init__(self, img_size, hidden_size):
        super(AE_FC, self).__init__()
        self.img_size = img_size
        self.model_code = "AE_FC_32"
        params = [2048, 512, 128]
        input_size = 3 * img_size * img_size

        self.flatten = nn.Flatten()

        # 定义Encoder
        self.Encoder = nn.Sequential(
            nn.Linear(input_size, params[0]),
            nn.ReLU(),
            nn.Linear(params[0], params[1]),
            nn.ReLU(),
            nn.Linear(params[1], params[2]),
            nn.ReLU(),
            nn.Linear(params[2], hidden_size),
            nn.ReLU()
        )
        # 定义Decoder
        self.Decoder = nn.Sequential(
            nn.Linear(hidden_size, params[2]),
            nn.ReLU(),
            nn.Linear(params[2], params[1]),
            nn.ReLU(),
            nn.Linear(params[1], params[0]),
            nn.ReLU(),
            nn.Linear(params[0], input_size),
            nn.Sigmoid()
        )

        # 定义网路的前向传播路径

    def forward(self, x):
        # x = x.view(x.size(0), -1)
        x = self.flatten(x)
        encoder = self.Encoder(x)
        decoder = self.Decoder(encoder)
        decoder = decoder.view(x.size(0), 3, self.img_size, self.img_size)
        return encoder, decoder


class AE_CNN(nn.Module):
    def __init__(self):
        super(AE_CNN, self).__init__()
        self.model_code = "AE_CNN_32"
        # Input size: [batch, 3, 32, 32]
        # Output size: [batch, 3, 32, 32]
        self.encoder = nn.Sequential(
            nn.Conv2d(3, 12, 4, stride=2, padding=1),  # [batch, 12, 16, 16]
            nn.ReLU(),
            nn.Conv2d(12, 24, 4, stride=2, padding=1),  # [batch, 24, 8, 8]
            nn.ReLU(),
            nn.Conv2d(24, 48, 4, stride=2, padding=1),  # [batch, 48, 4, 4]
            nn.ReLU(),
            nn.Conv2d(48, 96, 4, stride=2, padding=1),  # [batch, 96, 2, 2]
            nn.ReLU(),
        )
        self.decoder = nn.Sequential(
            nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1),  # [batch, 48, 4, 4]
            nn.ReLU(),
            nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1),  # [batch, 24, 8, 8]
            nn.ReLU(),
            nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1),  # [batch, 12, 16, 16]
            nn.ReLU(),
            nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1),  # [batch, 3, 32, 32]
            nn.Sigmoid(),
        )

    def forward(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return encoded, decoded


# VAE模型
class VAE_FC(nn.Module):
    def __init__(self, img_size, z_dim):
        super(VAE_FC, self).__init__()
        self.img_size = img_size
        self.model_code = "VAE_FC_32"
        params = [2048, 512, 128]
        input_size = 3 * img_size * img_size

        self.flatten = nn.Flatten()

        self.fc1 = nn.Sequential(
            nn.Linear(input_size, params[0]),
            nn.ReLU(),
            nn.Linear(params[0], params[1]),
            nn.ReLU(),
            nn.Linear(params[1], params[2]),
            nn.ReLU(),
        )
        self.fc2 = nn.Linear(params[2], z_dim)
        self.fc3 = nn.Linear(params[2], z_dim)
        self.fc4 = nn.Linear(z_dim, params[2])
        self.fc5 = nn.Sequential(
            nn.Linear(params[2], params[1]),
            nn.ReLU(),
            nn.Linear(params[1], params[0]),
            nn.ReLU(),
            nn.Linear(params[0], input_size),
        )

    # 编码，学习高斯分布均值与方差
    def encode(self, x):
        h = F.relu(self.fc1(x))
        return self.fc2(h), self.fc3(h)

    # 将高斯分布均值与方差参数重表示，生成隐变量z  若x~N(mu, var*var)分布,则(x-mu)/var=z~N(0, 1)分布
    def reparameterize(self, mu, log_var):
        std = torch.exp(log_var / 2)
        eps = torch.randn_like(std)
        return mu + eps * std

    # 解码隐变量z
    def decode(self, z):
        h = F.relu(self.fc4(z))
        return torch.sigmoid(self.fc5(h))

    # 计算重构值和隐变量z的分布参数
    def forward(self, x):
        x = self.flatten(x)

        mu, log_var = self.encode(x)  # 从原始样本x中学习隐变量z的分布，即学习服从高斯分布均值与方差
        z = self.reparameterize(mu, log_var)  # 将高斯分布均值与方差参数重表示，生成隐变量z
        x_reconst = self.decode(z)  # 解码隐变量z，生成重构x’
        x_reconst = x_reconst.view(x.size(0), 3, self.img_size, self.img_size)
        return (mu, log_var), x_reconst   # 返回重构值和隐变量的分布参数

# class VAE_CNN(nn.Module):
#     def __init__(self, img_size=32, z_dim=384):
#         super(VAE_CNN, self).__init__()
#         self.img_size = img_size
#         self.model_code = "VAE_CNN_32"
#         c_dim = 96 * (img_size // 16)**2
#
#         self.flatten = nn.Flatten()
#         self.fc1 = nn.Sequential(
#             nn.Conv2d(3, 12, 4, stride=2, padding=1),  # [batch, 12, 16, 16]
#             nn.ReLU(),
#             nn.Conv2d(12, 24, 4, stride=2, padding=1),  # [batch, 24, 8, 8]
#             nn.ReLU(),
#             nn.Conv2d(24, 48, 4, stride=2, padding=1),  # [batch, 48, 4, 4]
#             nn.ReLU(),
#             nn.Conv2d(48, 96, 4, stride=2, padding=1),  # [batch, 96, 2, 2]
#             nn.ReLU(),
#         )
#         self.fc2 = nn.Linear(c_dim, z_dim)
#         self.fc3 = nn.Linear(c_dim, z_dim)
#         self.fc4 = nn.Linear(z_dim, c_dim)
#         self.fc5 = nn.Sequential(
#             nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1),  # [batch, 48, 4, 4]
#             nn.ReLU(),
#             nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1),  # [batch, 24, 8, 8]
#             nn.ReLU(),
#             nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1),  # [batch, 12, 16, 16]
#             nn.ReLU(),
#             nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1),  # [batch, 3, 32, 32]
#         )
#
#     # 编码，学习高斯分布均值与方差
#     def encode(self, x):
#         h = F.relu(self.fc1(x))
#         h = self.flatten(h)
#         return self.fc2(h), self.fc3(h)
#
#     # 将高斯分布均值与方差参数重表示，生成隐变量z  若x~N(mu, var*var)分布,则(x-mu)/var=z~N(0, 1)分布
#     def reparameterize(self, mu, log_var):
#         std = torch.exp(log_var / 2)
#         eps = torch.randn_like(std)
#         return mu + eps * std
#
#     # 解码隐变量z
#     def decode(self, z):
#         h = F.relu(self.fc4(z))
#         h = h.view(h.size(0), 96, 2, 2)
#         return torch.sigmoid(self.fc5(h))
#
#     # 计算重构值和隐变量z的分布参数
#     def forward(self, x):
#         mu, log_var = self.encode(x)  # 从原始样本x中学习隐变量z的分布，即学习服从高斯分布均值与方差
#         z = self.reparameterize(mu, log_var)  # 将高斯分布均值与方差参数重表示，生成隐变量z
#         x_reconst = self.decode(z)  # 解码隐变量z，生成重构x’
#         return (mu, log_var), x_reconst   # 返回重构值和隐变量的分布参数

class VAE_CNN(nn.Module):
    def __init__(self, img_size=32,):
        super(VAE_CNN, self).__init__()
        self.img_size = img_size
        self.model_code = "VAE_CNN_32"
        self.z_size = img_size // 16

        params = [32, 64, 80, 128]
        self.conv1 = nn.Sequential(
            nn.Conv2d(3, params[0], 4, stride=2, padding=1),  # [batch, 12, 16, 16]
            nn.ReLU(),
            nn.Conv2d(params[0], params[1], 4, stride=2, padding=1),  # [batch, 24, 8, 8]
            nn.ReLU(),
            nn.Conv2d(params[1], params[2], 4, stride=2, padding=1),  # [batch, 48, 4, 4]
            nn.ReLU(),
            # nn.Conv2d(48, 96, 4, stride=2, padding=1),  # [batch, 96, 2, 2]
            # nn.ReLU(),
        )
        self.conv2 = nn.Conv2d(params[2], params[3], 4, stride=2, padding=1)
        self.conv3 = nn.Conv2d(params[2], params[3], 4, stride=2, padding=1)
        # self.fc4 = nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1)
        self.conv5 = nn.Sequential(
            nn.ConvTranspose2d(params[3], params[2], 4, stride=2, padding=1),  # [batch, 48, 4, 4]
            nn.ReLU(),
            nn.ConvTranspose2d(params[2], params[1], 4, stride=2, padding=1),  # [batch, 24, 8, 8]
            nn.ReLU(),
            nn.ConvTranspose2d(params[1], params[0], 4, stride=2, padding=1),  # [batch, 12, 16, 16]
            nn.ReLU(),
            nn.ConvTranspose2d(params[0], 3, 4, stride=2, padding=1),  # [batch, 3, 32, 32]
        )

    # 编码，学习高斯分布均值与方差
    def encode(self, x):
        h = F.relu(self.conv1(x))
        return self.conv2(h), self.conv3(h)

    # 将高斯分布均值与方差参数重表示，生成隐变量z  若x~N(mu, var*var)分布,则(x-mu)/var=z~N(0, 1)分布
    def reparameterize(self, mu, log_var):
        std = torch.exp(log_var / 2)
        eps = torch.randn_like(std)
        return mu + eps * std

    # 解码隐变量z
    def decode(self, z):
        return torch.sigmoid(self.conv5(z))

    # 计算重构值和隐变量z的分布参数
    def forward(self, x):
        mu, log_var = self.encode(x)  # 从原始样本x中学习隐变量z的分布，即学习服从高斯分布均值与方差
        z = self.reparameterize(mu, log_var)  # 将高斯分布均值与方差参数重表示，生成隐变量z
        x_reconst = self.decode(z)  # 解码隐变量z，生成重构x’
        return (mu, log_var), x_reconst   # 返回重构值和隐变量的分布参数
