import os

import torch
import torchvision
import numpy as np
import torch.nn as nn
import matplotlib.pyplot as plt
from PIL import Image
from torch.utils.data import DataLoader
from torchvision.utils import save_image, make_grid

from utils.models import *
from utils.dataset import *
from utils.train_parameters import *
from utils.transform import *
from utils.util import *


os.chdir(os.path.dirname(__file__)) # set work path equal to file path

if not os.path.exists('./results'):
    os.makedirs('./results')

if not os.path.exists('./models'):
    os.makedirs('./models') 

PATH = {
    'data': './data/',
    'pic_saved': './results/',
    'model_saved': './models/',
}
DATALENGTH = len(os.listdir(PATH['data']))
BATCHSIZE = 16


input_shape = (3, 224, 224)

G_A2B = GeneratorResNet(input_shape, 9)
G_B2A = GeneratorResNet(input_shape, 9)
D_A = Discriminator(input_shape)
D_B = Discriminator(input_shape)

# 初始化参数
parameters = parameters_constructor(
    G_A2B.parameters(), G_B2A.parameters(),
    D_A.parameters(), D_B.parameters(),
    lr = 0.0003
)

# 更新学习率、设置图片缓冲区
parameters.set(**{
    'lr_scheduler_G': torch.optim.lr_scheduler.LambdaLR(
        parameters('optimizer')['g'], lr_lambda=LambdaLR(parameters('train_epochs'), 0, parameters('decay_epoch')).step
    ),
    'lr_scheduler_D_A': torch.optim.lr_scheduler.LambdaLR(
        parameters('optimizer')['d_a'], lr_lambda=LambdaLR(parameters('train_epochs'), 0, parameters('decay_epoch')).step
    ),
    'lr_scheduler_D_B': torch.optim.lr_scheduler.LambdaLR(
        parameters('optimizer')['d_b'], lr_lambda=LambdaLR(parameters('train_epochs'), 0, parameters('decay_epoch')).step
    ),
    'fake_A_buffer': ReplayBuffer(),
    'fake_B_buffer': ReplayBuffer(),
})

# 如果GPU可用，则在cuda模式下运行
if torch.cuda.is_available():
    G_A2B = G_A2B.cuda()
    G_B2A = G_B2A.cuda()
    D_A = D_A.cuda()
    D_B = D_B.cuda()    
    parameters('loss_fn')['criterion_GAN'].cuda()
    parameters('loss_fn')['criterion_cycle'].cuda()
    parameters('loss_fn')['criterion_identity'].cuda()

# 获取数据
dataloader = DataLoader(
    ImageDataset(PATH['data'], TRANSFORM, unaligned=True),
    batch_size = BATCHSIZE, 
    shuffle = True,
)

def sample_images(batches_ones):
    imgs = next(iter(dataloader))
    G_A2B.eval()
    G_B2A.eval()
    if torch.cuda.is_available():
        real_a = Variable(imgs['a']).cuda()
        real_b = Variable(imgs['b']).cuda()
    else:
        real_a = Variable(imgs['a']).cpu()
        real_b = Variable(imgs['b']).cpu()
    fake_b = G_A2B(real_a)
    fake_a = G_B2A(real_b)

    real_a = make_grid(real_a, nrow=5, normalize=True)
    real_b = make_grid(real_b, nrow=5, normalize=True)
    fake_a = make_grid(fake_a, nrow=5, normalize=True)
    fake_b = make_grid(fake_b, nrow=5, normalize=True)

    image_grid = torch.cat((real_a, fake_b, real_b, fake_a), 1)
    save_image(image_grid, 'results/%s.png'%(batches_ones), normalize=False)

def run(): # train
    prev_time = time.time()
    for epoch in range(0, parameters('train_epochs')):
        for i, batch in enumerate(dataloader):
            print('现在是第%d轮'%(i))
            if torch.cuda.is_available():
                real_a = Variable(batch['a']).cuda()
                real_b = Variable(batch['b']).cuda()
                valid = Variable(torch.ones((real_a.size(0), *D_A.output_shape)), requires_grad=False).cuda()
                fake = Variable(torch.ones((real_a.size(0), *D_A.output_shape)), requires_grad=False).cuda()
            else:
                real_a = Variable(batch['a']).cpu()
                real_b = Variable(batch['b']).cpu()
                valid = Variable(torch.ones((real_a.size(0), *D_A.output_shape)), requires_grad=False).cpu()
                fake = Variable(torch.ones((real_a.size(0), *D_A.output_shape)), requires_grad=False).cpu()

            G_A2B.train()
            G_B2A.train()

            loss_id_a = parameters('loss_fn')['criterion_identity'](G_B2A(real_a), real_a)
            loss_id_b = parameters('loss_fn')['criterion_identity'](G_A2B(real_b), real_b)
            loss_identity = (loss_id_a + loss_id_b) / 2

            fake_b = G_A2B(real_a)
            loss_GAN_A2B = parameters('loss_fn')['criterion_GAN'](D_B(fake_b), valid)
            fake_a = G_B2A(real_b)
            loss_GAN_B2A = parameters('loss_fn')['criterion_GAN'](D_A(fake_a), valid)
            loss_GAN = (loss_GAN_A2B + loss_GAN_B2A) / 2

            recov_A = G_B2A(fake_b)
            loss_cycle_a = parameters('loss_fn')['criterion_cycle'](recov_A, real_a)
            recov_B = G_A2B(fake_a)
            loss_cycle_b = parameters('loss_fn')['criterion_cycle'](recov_B, real_b)
            loss_cycle = (loss_cycle_a + loss_cycle_b) / 2

            loss_G = loss_GAN + parameters('lambda_cyc') * loss_cycle + parameters('lambda_id') * loss_identity
            parameters('optimizer')['g'].zero_grad()
            loss_G.backward()
            parameters('optimizer')['g'].step()

            loss_real = parameters('loss_fn')['criterion_GAN'](D_A(real_a), valid)
            fake_a_ = parameters('fake_A_buffer').push_and_pop(fake_a)
            loss_fake = parameters('loss_fn')['criterion_GAN'](D_A(fake_a_.detach()), fake)
            loss_D_A = (loss_real + loss_fake) / 2
            parameters('optimizer')['d_a'].zero_grad()
            loss_D_A.backward()
            parameters('optimizer')['d_a'].step()

            loss_real = parameters('loss_fn')['criterion_GAN'](D_B(real_b), valid)
            fake_b_ = parameters('fake_B_buffer').push_and_pop(fake_b)
            loss_fake = parameters('loss_fn')['criterion_GAN'](D_B(fake_b_.detach()), fake)
            loss_D_B = (loss_real + loss_fake) / 2
            parameters('optimizer')['d_b'].zero_grad()
            loss_D_B.backward()
            parameters('optimizer')['d_b'].step()
            
            loss_D = (loss_D_A + loss_D_B) / 2

            batches_done = epoch * len(dataloader) + i
            batches_left = parameters('train_epochs') * len(dataloader) - batches_done
            time_left = datetime.timedelta(seconds=batches_left * (time.time() - prev_time))
            prev_time = time.time()

            sys.stdout.write(
                "\r[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f, adv: %f, cycle: %f, identity: %f] ETA: %s"
                % (
                    epoch,
                    parameters('train_epochs'),
                    i,
                    len(dataloader),
                    loss_D.item(),
                    loss_G.item(),
                    loss_GAN.item(),
                    loss_cycle.item(),
                    loss_identity.item(),
                    time_left,
                )
            )

            # 每20张保存一组测试图片
            if batches_done % 20 == 0:
                sample_images(batches_done)

        parameters('lr_scheduler_G').step()
        parameters('lr_scheduler_D_A').step()
        parameters('lr_scheduler_D_B').step()


    # 保存模型
    torch.save(G_A2B.state_dict(), "models/G_A2B.pth")
    torch.save(G_B2A.state_dict, "models/G_B2A.pth")
    torch.save(D_A.state_dict, "models/D_A.pth")
    torch.save(D_B.state_dict, "models/D_B.pth")
    print('save my model finished!')

if __name__ == '__main__':
    run()