import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import numpy as np

class Generator(nn.Module):
    def __init__(self, z_dim, label_dim, img_shape, dropout_prob=0):
        super(Generator, self).__init__()
        self.label_embedding = nn.Embedding(label_dim, z_dim)  # 标签嵌入维度直接设为噪声向量的维度
        self.init_size = img_shape[1] // 16  # Initial size before upsampling, changed to 16 for better results
        self.l1 = nn.Sequential(nn.Linear(z_dim + z_dim, 256 * self.init_size ** 2))  # 增加了通道数
        
        # 增加了一个额外的卷积块和深度卷积层来提高生成器的表现
        self.conv1 = self._conv_block(256, 256, 2, dropout_prob)
        self.conv2 = self._conv_block(256, 128, 2, dropout_prob)
        self.conv3 = self._conv_block(128, 64, 2, dropout_prob)
        self.conv4 = self._conv_block(64, 64, 2, dropout_prob)
        self.final_conv = nn.Conv2d(64, img_shape[0], 3, stride=1, padding=1)  # 输出通道数为3，生成RGB图像
        self.tanh = nn.Tanh()  # 输出层使用tanh激活函数将值映射到[-1, 1]区间
        self._initialize_weights()

    def _conv_block(self, in_channels, out_channels, scale_factor, dropout_prob):
        layers = [
            nn.BatchNorm2d(in_channels),
            nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=False),  # 使用bilinear插值提升效果
            nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1),
            nn.BatchNorm2d(out_channels, 0.8),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Dropout(dropout_prob)  # 加入Dropout层以提高网络的鲁棒性
        ]
        return nn.Sequential(*layers)

    def _initialize_weights(self):
        for m in self.modules():
            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='leaky_relu')
                if m.bias is not None:
                    init.constant_(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.normal_(m.weight, 1.0, 0.02)
                init.constant_(m.bias, 0)

    def forward(self, noise, labels):
        gen_input = torch.cat((self.label_embedding(labels), noise), -1)  # 拼接噪声和标签嵌入
        out = self.l1(gen_input)  # 线性层
        out = out.view(out.size(0), 256, self.init_size, self.init_size)  # 将张量重塑为(256, init_size, init_size)
        out = self.conv1(out)  # 第一个卷积块
        out = self.conv2(out)  # 第二个卷积块
        out = self.conv3(out)  # 第三个卷积块
        out = self.conv4(out)  # 第三个卷积块
        img = self.final_conv(out)  # 最终卷积层生成RGB图像
        img = self.tanh(img)  # 使用tanh激活函数
        return img
# class Generator(nn.Module):
#     def __init__(self, z_dim, label_dim, img_shape, dropout_prob=0.1):
#         super(Generator, self).__init__()
#         self.label_embedding = nn.Embedding(label_dim, label_dim)
#         self.init_size = img_shape[1] // 4  # Initial size before upsampling
#         self.l1 = nn.Sequential(nn.Linear(z_dim + label_dim, 128 * self.init_size ** 2))
#         self.conv1 = self._conv_block(128, 128, 2, dropout_prob)
#         self.conv2 = self._conv_block(128, 64, 2, dropout_prob)
#         self.final_conv = nn.Conv2d(64, img_shape[0], 3, stride=1, padding=1)
#         self.tanh = nn.Tanh()
#         self._initialize_weights()

#     def _conv_block(self, in_channels, out_channels, scale_factor, dropout_prob):
#         layers = [
#             nn.BatchNorm2d(in_channels),
#             nn.Upsample(scale_factor=scale_factor),
#             nn.Conv2d(in_channels, out_channels, 3, stride=1, padding=1),
#             nn.BatchNorm2d(out_channels, 0.8),
#             nn.LeakyReLU(0.2, inplace=True),
#         ]
#         return nn.Sequential(*layers)

#     def _initialize_weights(self):
#         for m in self.modules():
#             if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
#                 init.kaiming_uniform_(m.weight, mode='fan_in', nonlinearity='leaky_relu')
#                 if m.bias is not None:
#                     init.constant_(m.bias, 0)
#             elif isinstance(m, nn.BatchNorm2d):
#                 init.normal_(m.weight, 1.0, 0.02)
#                 init.constant_(m.bias, 0)

#     def forward(self, noise, labels):
#         gen_input = torch.cat((self.label_embedding(labels), noise), -1)
#         out = self.l1(gen_input)
#         out = out.view(out.size(0), 128, self.init_size, self.init_size)
#         out = self.conv1(out)
#         out = self.conv2(out)
#         img = self.final_conv(out)
#         img = self.tanh(img)
#         return img

# class Discriminator(nn.Module):
#     def __init__(self, label_dim, img_dim, dropout_prob=0.5):
#         super(Discriminator, self).__init__()
#         img_channels = img_dim[0]  
#         self.img_size = img_dim[1]  
#         self.label_embedding = nn.Embedding(label_dim, img_channels*img_dim[1]*img_dim[2])  # 生成与图像通道数相匹配的标签嵌入
        
#         self.conv0 = nn.Conv2d(img_channels * 2, 64, kernel_size=4, stride=2, padding=1)  # 64x128x128
#         self.bn0 = nn.BatchNorm2d(64, 0.8)
#         self.conv1 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1)  # 128x64x64
#         self.bn1 = nn.BatchNorm2d(128, 0.8)
#         self.conv2 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1)  # 256x32x32
#         self.bn2 = nn.BatchNorm2d(256, 0.8)
#         self.conv3 = nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1)  # 512x16x16
#         self.bn3 = nn.BatchNorm2d(512, 0.8)
#         self.conv4 = nn.Conv2d(512, 512, kernel_size=4, stride=2, padding=1)  # 512x8x8
#         self.fc1= nn.Linear(512 * 8 * 8, 512)  # 1x5x5 -> 1024
#         self.fc2 = nn.Linear(512, 256)  # 1024 -> 512
#         self.fc3 = nn.Linear(256, 1)  # 512 -> 1

                
#     def forward(self, img, labels):
#         c = self.label_embedding(labels)  # Get label embeddings
#         c = c.view(labels.size(0),img.size(1), self.img_size, self.img_size)  # Reshape embeddings to match image channels
#         c = c.expand(-1, img.size(1), -1, -1)  # Expand to match image channels
#         x = torch.cat((img, c), 1)  # Concatenate image and label embeddings
#         x = F.leaky_relu(self.bn0(self.conv0(x)), 0.2)
#         x = F.leaky_relu(self.bn1(self.conv1(x)), 0.2)
#         x = F.leaky_relu(self.bn2(self.conv2(x)), 0.2)
#         x = F.leaky_relu(self.bn3(self.conv3(x)), 0.2)
#         x = self.conv4(x)
#         x = x.view(x.size(0), -1)  # Flatten the tensor for the fully connected layers
#         x = F.leaky_relu(self.fc1(x), 0.2)
#         x = F.leaky_relu(self.fc2(x), 0.2)
#         validity = self.fc3(x)  # Final output
#         return validity
      
class Discriminator(nn.Module):
    def __init__(self, label_dim,img_dim,dropout_prob=0.5):
        super(Discriminator, self).__init__()
        self.label_embedding = nn.Embedding(label_dim, label_dim)
        img_flat_dim = int(np.prod(img_dim))

        self.model = nn.Sequential(
            nn.Linear(img_flat_dim + label_dim, 128),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(128, 64),
            nn.LeakyReLU(0.2, inplace=True),
            nn.Linear(64, 1),
            #nn.Sigmoid()
        )
            
    def forward(self, img, labels):
        c = self.label_embedding(labels)
        x = torch.cat([img.view(img.size(0), -1), c], 1)
        x = x + 0.05 * torch.randn_like(x) #add noise
        validity = self.model(x)
        return validity
        
    