###损失函数
import paddle
import warnings
import paddle.optimizer as optim
from model import Discriminator,Generator
from dataset import DataGenerater

import os
import cv2
import numpy as np
import parameter
warnings.filterwarnings('ignore')


#图片拼接函数
def combine(imgs):
    batch_size = parameter.batch_size
    base = imgs[0]
    w = int(batch_size**(1/2))
    for i in range(1,w):
        base = np.concatenate((base,imgs[i]),axis=0) 
   
    for i in range(1,w):
        img = imgs[i*w]
        for ii in range(i*w+1,i*w+w):
            img = np.concatenate((img,imgs[ii]),axis=0)
        base = np.concatenate((base,img),axis=1)
    return base

#损失函数
loss = paddle.nn.BCELoss()

#参数
#img_dim = 64
lr = parameter.lr
epoch = parameter.epoch
batch_size = parameter.batch_size
G_DIMENSION = parameter.G_DIMENSION
beta1=parameter.beta1
beta2=parameter.beta2
output_path = parameter.output_path
checkpoint_path = parameter.checkpoint_path

#device = paddle.set_device('gpu')
#paddle.disable_static(device)

real_label = np.ones((batch_size,1,1,1))- np.random.random_sample((batch_size,1,1,1))*0.2
fake_label = np.random.random_sample((batch_size,1,1,1))*0.2
real_label = paddle.to_tensor(real_label,dtype="float32")
fake_label = paddle.to_tensor(fake_label,dtype="float32")


#定义网络
netD = Discriminator()
netG = Generator()
#优化器
optimizerD = optim.Adam(parameters=netD.parameters(), learning_rate=lr, beta1=beta1, beta2=beta2)
optimizerG = optim.Adam(parameters=netG.parameters(), learning_rate=lr, beta1=beta1, beta2=beta2)
#数据加载类
train_dataset = DataGenerater()
train_loader = paddle.io.DataLoader(train_dataset,batch_size=batch_size)

###训练过程
losses = [[], []]
#plt.ion()
now = 0
for pass_id in range(epoch):
    paddle.save(netG.state_dict(), "work/generator.params")
    # enumerate()函数将一个可遍历的数据对象组合成一个序列列表
    for batch_id, data in enumerate(train_loader()):
        #训练判别器 
        batch_size = data.shape[0]
        real_label = np.ones((batch_size,1,1,1))- np.random.random_sample((batch_size,1,1,1))*0.2
        fake_label = np.random.random_sample((batch_size,1,1,1))*0.2
        real_label = paddle.to_tensor(real_label,dtype="float32")
        fake_label = paddle.to_tensor(fake_label,dtype="float32")

        optimizerD.clear_grad()
        output = netD(data)
        errD_real = loss(output,real_label)
        errD_real.backward()
        optimizerD.step()
        optimizerD.clear_grad()

        noise = paddle.randn([batch_size,G_DIMENSION,1,1],'float32')
        fake = netG(noise)
        output = netD(fake.detach())
        errD_fake = loss(output,fake_label)
        errD_fake.backward()
        optimizerD.step()
        optimizerD.clear_grad()

        errD = errD_real + errD_fake
        
        losses[0].append(errD.numpy()[0])
        ###训练生成器
        optimizerG.clear_grad()
        noise = paddle.randn([batch_size,G_DIMENSION,1,1],'float32')
        fake = netG(noise)
        output = netD(fake)
        errG = loss(output,real_label)
        errG.backward()
        optimizerG.step()
        optimizerG.clear_grad()
        
        losses[1].append(errG.numpy()[0])
        if batch_id % 100 == 0:
            if not os.path.exists(output_path):
                os.makedirs(output_path)

            # 每轮的生成结果
            n = int(parameter.batch_size ** (1/2))
            noise = paddle.randn([n,G_DIMENSION,1,1],'float32')

            generated_image = netG(noise).numpy()
            imgs = []
            #plt.figure(figsize=(15,15))
            try:
                imgs = []
                for i in range(n):
                    image = generated_image[i].transpose()
                    image = np.where(image > 0, image, 0)
                    image = np.where(image < 1, image, 1)
                    image = image*255.
                    imgs.append(image)
                    
                img = combine(imgs)
                print(img.shape)
                cv2.imwrite('{}/{:04d}_{:04d}.png'.format(output_path, pass_id, batch_id),img)
                msg = 'Epoch ID={0} Batch ID={1} D-Loss={2} G-Loss={3}'.format(pass_id, batch_id, errD.numpy()[0], errG.numpy()[0])
                print(msg)
                #plt.savefig('{}/{:04d}_{:04d}.png'.format(output_path, pass_id, batch_id),bbox_inches='tight')
                #plt.pause(0.01)
                
            except IOError:
                print(IOError)
                
        if batch_id % 500 == 0:
            if not os.path.exists(checkpoint_path):
                os.makedirs(checkpoint_path)
            paddle.save(netG.state_dict(), "{}/generator_{}_{}.params".format(checkpoint_path,pass_id,batch_id))

losses[0] = [float(i) for i in losses[0]]
losses[1] = [float(i) for i in losses[1]]

paddle.save(netG.state_dict(), "work/generator.params")
import json
with open("losses.json","w+") as f:
    f.write(json.dumps(losses))

