from MyDataset import MyDataset
import torch
import torchvision.transforms as transforms
from torch.utils.tensorboard import SummaryWriter
import torch.nn as nn
from torchvision.utils import save_image
from torch.utils.tensorboard import SummaryWriter
import os

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

writer = SummaryWriter("100-faces-logs")

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.5,), (0.5,))
])

batch_size = 128
NOISE_DIM = 50
epochs = 50

dataset = MyDataset(txt_path='./data/faces.txt', transform=transform)
dataLoader = torch.utils.data.DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)


# 判别模型,输入3*96*96
class Discriminator(nn.Module):
    def __init__(self, d):
        super(Discriminator, self).__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(3, d, kernel_size=5,  stride=3, padding=1),
            nn.LeakyReLU(0.2),
            nn.Conv2d(d, d * 2, kernel_size=4,  stride=2, padding=1),
            nn.BatchNorm2d(d * 2),
            nn.LeakyReLU(0.2),
            nn.Conv2d(d * 2, d * 4, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(d * 4),
            nn.LeakyReLU(0.2),
            nn.Conv2d(d * 4, d * 8, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(d * 8),
            nn.LeakyReLU(0.2),
            nn.Conv2d(d * 8, 1, kernel_size=4, stride=1, padding=0),
            nn.Sigmoid()

        )

    def forward(self, x):
        return self.conv(x).view(-1, 1)


# 生成模型,将低维噪声变成一个图片数据
# H_{out} = (H_{in}-1)*stride - 2*padding + kernel_size
class Generator(nn.Module):
    def __init__(self, NOISE_DIM, d):
        super(Generator, self).__init__()

        self.conv = nn.Sequential(
            nn.ConvTranspose2d(NOISE_DIM, d * 8, kernel_size=4, stride=1, padding=0),
            nn.BatchNorm2d(d * 8),
            nn.ReLU(),

            nn.ConvTranspose2d(d * 8, d * 4, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(d * 4),
            nn.ReLU(),

            nn.ConvTranspose2d(d * 4, d * 2, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(d * 2),
            nn.ReLU(),

            nn.ConvTranspose2d(d * 2, d, kernel_size=4, stride=2, padding=1),
            nn.BatchNorm2d(d),
            nn.ReLU(),

            nn.ConvTranspose2d(d, 3, kernel_size=5, stride=3, padding=1),
            nn.Tanh()
        )

    def forward(self, x):
       return self.conv(x)


criterion = nn.BCELoss()
D = Discriminator(64).to(device)
G = Generator(NOISE_DIM, 64).to(device)

# 创建文件夹
if not os.path.exists('./img3'):
    os.mkdir('./img3')


def to_img(x):
    out = 0.5 * (x + 1)
    out = out.clamp(0, 1)  # Clamp函数可以将随机变化的数值限制在一个给定的区间[min, max]内：
    out = out.view(-1, 3, 96, 96)  # view()函数作用是将一个多行的Tensor,拼接成一行
    return out


def discriminatorLoss(logits_real, logits_fake):
    size = logits_real.shape[0]
    true_labels = torch.ones(size, 1).to(device)
    fake_labels = torch.zeros(size, 1).to(device)
    loss = criterion(logits_real, true_labels) + criterion(logits_fake, fake_labels)
    return loss


def generatorLoss(logits_fake):
    size = logits_fake.shape[0]
    fake_labels = torch.ones(size, 1).to(device)
    loss = criterion(logits_fake, fake_labels)
    return loss


def get_optimizer(net):
    optimizer = torch.optim.Adam(net.parameters(), lr=0.0003)
    return optimizer


def train():
    for epoch in range(epochs):
        for i, (real_img, _) in enumerate(dataLoader):
            num_img = real_img.shape[0]
            # =============更新判别网络==================
            logits_real = D(real_img)
            real_scores = logits_real

            # 生成随机噪声,送入生成网络中,生成伪图片,再送入判别网络中
            noise = torch.randn(num_img, NOISE_DIM, 1, 1)
            fake_img = G(noise)
            logits_fake = D(fake_img)
            fake_scores = logits_fake
            d_loss = discriminatorLoss(logits_real, logits_fake)

            get_optimizer(D).zero_grad()
            d_loss.backward()
            get_optimizer(D).step()

            # ==============更新生成网络==================
            noise = torch.randn(num_img, NOISE_DIM, 1, 1)
            fake_img = G(noise)
            logits_fake = D(fake_img)
            g_loss = generatorLoss(logits_fake)

            get_optimizer(G).zero_grad()
            g_loss.backward()
            get_optimizer(G).step()

            writer.add_scalar("d_loss", d_loss, i+1+epoch*len(dataLoader))
            writer.add_scalar("g_loss", g_loss, i + 1 + epoch * len(dataLoader))
            if i % 100 == 0:
                print('Epoch[{}/{}], d_loss:{:.3f}, g_loss:{:.3f}, logits_real:{:.3f}, logits_fake:{:.3f}'
                      .format(epoch, epochs, d_loss, g_loss, real_scores.data.mean(), fake_scores.data.mean()))
            # 因为save_image函数会调用make_grid函数拼接，一行默认8个
            # 之所以是23个图片，是因为每一次迭代中，都要随机取128个小批数据，而一共51223个数据，这里取的是最后一次，51223%128的余数是23
            if i == len(dataLoader) - 1:
                fake_images = to_img(fake_img.data)
                save_image(fake_images, './img3/fake_images-{}.png'.format(epoch))
                writer.add_images("fake_image", fake_images, epoch)

    writer.close()


if __name__ == '__main__':
    train()
