# coding=utf-8
import torch.autograd
import torch.nn as nn
from torch.autograd import Variable
import torch.utils
import torch.utils.data
from torchvision import transforms
from torchvision import datasets
from torchvision.utils import save_image
import os

File_Path=os.getcwd()+"/gan_result"
if not os.path.exists(File_Path):
    os.makedirs(File_Path)
    print("目录新建成功:"+File_Path)
output_loss_file=open(File_Path+"/output_loss.txt","w+")

def to_img(x):
    out=0.5*(x+1)
    out=out.clamp(0,1)
    out=out.view(-1,1,28,28)
    return out


batch_size=128
num_epoch=20
z_dimension=100

img_transform=transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307,),(0.3081,))
])

mnist=datasets.MNIST(
    root='../data/',train=True,transform=img_transform,download=True
)

dataloader=torch.utils.data.DataLoader(
    dataset=mnist,batch_size=batch_size,shuffle=True
)

class discriminator(nn.Module):
    def __init__(self):
        super(discriminator,self).__init__()
        self.dis=nn.Sequential(
            nn.Linear(784,256),
            nn.LeakyReLU(0.2),
            nn.Linear(256,256),
            nn.LeakyReLU(0.2),
            nn.Linear(256,1),
            nn.Sigmoid()
        )
        
    def forward(self,x):
        x=self.dis(x)
        return x
    

class generator(nn.Module):
    def __init__(self):
        super(generator,self).__init__()
        self.gen=nn.Sequential(
            nn.Linear(100,256),
            nn.ReLU(True),
            nn.Linear(256,256),
            nn.ReLU(True),
            nn.Linear(256,784),
            nn.Tanh()
        )
        
    def forward(self,x):
        x=self.gen(x)
        x=x.squeeze(-1)
        return x
    
D=discriminator()
G=generator()

criterion=nn.BCELoss()
d_optimizer=torch.optim.Adam(D.parameters(),lr=0.0003)
g_optimizer=torch.optim.Adam(G.parameters(),lr=0.0003)

for epoch in range(num_epoch):
    for i,(img,_) in enumerate(dataloader):
        num_img=img.size(0)
        img=img.view(num_img,-1)
        real_img=Variable(img)
        real_label=Variable(torch.ones(num_img))
        fake_label=Variable(torch.zeros(num_img))
        
        real_out=D(real_img)
        real_out=real_out.squeeze(-1)
        d_loss_real=criterion(real_out,real_label)
        real_scores=real_out
        
        z=Variable(torch.randn(num_img,z_dimension))
        fake_img=G(z)
        fake_out=D(fake_img)
        fake_out=fake_out.squeeze(-1)
        d_loss_fake=criterion(fake_out,fake_label)
        fake_scores=fake_out
        
        d_loss=d_loss_real+d_loss_fake
        d_optimizer.zero_grad()
        d_loss.backward()
        d_optimizer.step()
        
        z=Variable(torch.randn(num_img,z_dimension))
        fake_img=G(z)
        output=D(fake_img)
        output=output.squeeze(-1)
        g_loss=criterion(output,real_label)
        
        g_optimizer.zero_grad()
        g_loss.backward()
        g_optimizer.step()
        
        
        if (i + 1) % 100 == 0:
            # 打印的是真实图片的损失均值
            prt_out = f'Epoch[{epoch}/{num_epoch}],d_loss:{d_loss.item():.6f},g_loss:{g_loss.item():.6f},D_real: {real_scores.data.mean():.6f},D_fake: {fake_scores.data.mean():.6f} \n'  
            output_loss_file.write(prt_out)

            print(prt_out)

        if epoch == 0:
            real_images = to_img(real_img.cpu().data)
            save_image(real_images, File_Path + '/real_images.png')

        fake_images = to_img(fake_img.cpu().data)
        save_image(fake_images, File_Path + '/fake_images-{}.png'.format(epoch + 1))

torch.save(G.state_dict(),'./generator.pth')
torch.save(D.state_dict(),'./discriminator.pth')