import torch
import os
from PIL import Image
import glob
from torch.utils import data
import numpy as np
import torchvision.transforms as transforms
import itertools
import torch.nn.functional as F
class ResidualBlock(torch.nn.Module):
    def __init__(self, in_features):
        super(ResidualBlock, self).__init__()
        conv_block=[
            torch.nn.ReflectionPad2d(1),
            torch.nn.Conv2d(in_features, in_features, 3),
            torch.nn.InstanceNorm2d(in_features),
            torch.nn.ReLU(inplace=True),
            torch.nn.ReflectionPad2d(1),
            torch.nn.Conv2d(in_features, in_features, 3),
            torch.nn.InstanceNorm2d(in_features)  ]
        self.conv_block=torch.nn.Sequential(*conv_block)
    def forward(self, x):
        return x + self.conv_block(x)
class Generator(torch.nn.Module):
    def __init__(self,in_nc,out_nc,n_res_blocks=9):
        super(Generator,self).__init__()
        self.out_nc=out_nc
        self.in_nc=in_nc
        self.n_res_blocks=n_res_blocks
        in_features=64
        out_features=in_features*2
        self.main=[
            #卷积 Block
            torch.nn.ReflectionPad2d(3),
            torch.nn.Conv2d(in_nc, 64, 7),
            torch.nn.InstanceNorm2d(64),
            torch.nn.ReLU(inplace=True)
        ]
        #下采样 Block1 2
        for i in range(2):
            self.main+=[
                torch.nn.Conv2d(in_features,out_features,3,stride=2,padding=1),
                torch.nn.InstanceNorm2d(out_features),
                torch.nn.ReLU(inplace=True)
            ]
            in_features = out_features
            out_features = in_features*2
        #连接残差块 个数为n_res_blocks个
        for i in range(n_res_blocks):
            self.main+=[ResidualBlock(in_features)]
        #上采样
        out_features=in_features//2
        self.main+=[
            torch.nn.ConvTranspose2d(in_features,out_features,3,stride=2, padding=1, output_padding=1),
            torch.nn.InstanceNorm2d(out_features),
            torch.nn.ReLU(inplace=True),
            torch.nn.ConvTranspose2d(out_features,out_features//2,3,stride=2, padding=1, output_padding=1),
            torch.nn.InstanceNorm2d(out_features//2),
            torch.nn.ReLU(inplace=True),
        ]
        #输出层
        self.main+=[
            torch.nn.ReflectionPad2d(3),
            torch.nn.Conv2d(64, out_nc, 7),
            torch.nn.Tanh()
        ]
        self.model=torch.nn.Sequential(*self.main)
            
    def forward(self,x):
        return self.model(x)
class Discriminator(torch.nn.Module):
    def __init__(self,in_nc):
        super(Discriminator,self).__init__()
        self.in_nc=in_nc
        self.main=torch.nn.Sequential(
            #卷积层 Block 1
            torch.nn.Conv2d(in_nc, 64, 4, stride=2, padding=1),
            torch.nn.LeakyReLU(0.2, inplace=True),
            #卷积层 Block 2
            torch.nn.Conv2d(64, 128, 4, stride=2, padding=1),
            torch.nn.InstanceNorm2d(128),
            torch.nn.LeakyReLU(0.2, inplace=True),
            #卷积层 Block 3
            torch.nn.Conv2d(128, 256, 4, stride=2, padding=1),
            torch.nn.InstanceNorm2d(256),
            torch.nn.LeakyReLU(0.2, inplace=True),
            #卷积层 Block 4
            torch.nn.Conv2d(256, 512, 4, padding=1),
            torch.nn.InstanceNorm2d(512),
            torch.nn.LeakyReLU(0.2, inplace=True),
            #卷积层 Block 5    Patch GAN
            torch.nn.Conv2d(512, 1, 4, padding=1),
        )
    def forward(self,x):
        x=self.main(x)
        return F.avg_pool2d(x,x.size()[2:]).view(x.size()[0],-1)
def tensor2image(tensor):
    image = 127.5*(tensor[0].cpu().float().numpy() + 1.0)
    if image.shape[0] == 1:
        image = np.tile(image, (3,1,1))
    return image.astype(np.uint8)
class ImageDataset(torch.utils.data.Dataset):
    def __init__(self, root, transforms_=None, unaligned=False, mode='train'):
        self.transform = transforms_
        self.unaligned = unaligned

        self.files_A = sorted(glob.glob(os.path.join(root, 'trainA') + '/*.*'))
        self.files_B = sorted(glob.glob(os.path.join(root, 'trainB') + '/*.*'))

    #重写使用[]查找对应下标元素时的返回值
    def __getitem__(self, index):
        item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))

        if self.unaligned:
            item_B = self.transform(Image.open(self.files_B[random.randint(0, len(self.files_B) - 1)]))
        else:
            item_B = self.transform(Image.open(self.files_B[index % len(self.files_B)]))

        return {'A': item_A, 'B': item_B}
    #重写len(当前类实例)的返回值
    def __len__(self):
        return max(len(self.files_A), len(self.files_B))
    def pp(self):
        print(self.files_A)
class ReplayBuffer():
    def __init__(self, max_size=50):
        assert (max_size > 0), 'Empty buffer or trying to create a black hole. Be careful.'
        self.max_size = max_size
        self.data = []

    def push_and_pop(self, data):
        to_return = []
        for element in data.data:
            element = torch.unsqueeze(element, 0)
            #缓冲区未满，直接放进去
            if len(self.data) < self.max_size:
                self.data.append(element)
                to_return.append(element)
            #缓冲区满了
            else:
                #随机做以下两个动作之一
                #复制缓冲区内的一个元素，放入to_return
                #然后更新缓冲区被复制元素为当前的元素element
                if random.uniform(0,1) > 0.5:
                    i = random.randint(0, self.max_size-1)
                    to_return.append(self.data[i].clone())
                    self.data[i] = element
                #直接把当前元素element放入to_return
                else:
                    to_return.append(element)
        return Variable(torch.cat(to_return))
fake_A_buffer = ReplayBuffer()
fake_B_buffer = ReplayBuffer()
image_size=256
dev=torch.device('cuda:0')
netG_A2B = Generator(3,3).to(dev)
netG_B2A = Generator(3,3).to(dev)
netD_A = Discriminator(3).to(dev)
netD_B = Discriminator(3).to(dev)
netG_A2B.load_state_dict(torch.load('./data/CycleGAN-netG_A2B.pth'))    # 加载模型参数
netG_B2A.load_state_dict(torch.load('./data/CycleGAN-netG_B2A.pth'))
netD_A.load_state_dict(torch.load('./data/CycleGAN-netD_A.pth'))
netD_B.load_state_dict(torch.load('./data/CycleGAN-netD_B.pth'))


criterion_GAN = torch.nn.MSELoss().to(dev)
criterion_cycle = torch.nn.L1Loss().to(dev)
criterion_identity = torch.nn.L1Loss().to(dev)
from torch.optim import lr_scheduler
optimizer_G = torch.optim.Adam(itertools.chain(netG_A2B.parameters(), netG_B2A.parameters()),
                                lr=0.0003, betas=(0.9, 0.999))
optimizer_D_A = torch.optim.Adam(netD_A.parameters(), lr=0.0003, betas=(0.9, 0.999))
optimizer_D_B = torch.optim.Adam(netD_B.parameters(), lr=0.0003, betas=(0.9, 0.999))
scheduler1 = lr_scheduler.ExponentialLR(optimizer_G, gamma=0.9)
scheduler2 = lr_scheduler.ExponentialLR(optimizer_D_A, gamma=0.9)
scheduler3 = lr_scheduler.ExponentialLR(optimizer_D_B, gamma=0.9)
ts= transforms.Compose([ transforms.Resize(image_size, Image.BICUBIC),
                transforms.RandomHorizontalFlip(p=0.5),
                transforms.RandomCrop(image_size),
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ])
batch_size=4
dataloader = data.DataLoader(ImageDataset('./data/vango/vangogh2photo/', transforms_=ts, unaligned=True),
                        batch_size=batch_size,shuffle=True)
#真实域内图像
import random
from torch.autograd import Variable

input_A = torch.Tensor(batch_size, 3, image_size, image_size).to(dev)
input_B = torch.Tensor(batch_size, 3, image_size, image_size).to(dev)
#label
target_real = Variable(torch.Tensor(batch_size).fill_(1.0), requires_grad=False).to(dev)
target_fake = Variable(torch.Tensor(batch_size).fill_(0.0), requires_grad=False).to(dev)
for epoch in range(0, 10):
    for i, batch in enumerate(dataloader):
        # 获取域A和域B中一个batch的真实图像
        real_A = Variable(input_A.copy_(batch['A'])).to(dev)
        real_B = Variable(input_B.copy_(batch['B'])).to(dev)

        ###### Generators A2B and B2A ######
        optimizer_G.zero_grad()

        # loss part1 ：Identity loss
        # 如果输入是B_real，那么应该输出B_real
        same_B = netG_A2B(real_B)
        loss_identity_B = criterion_identity(same_B, real_B)*5.0
        # 如果输入是A_real，那么应该输出A_real
        same_A = netG_B2A(real_A)
        loss_identity_A = criterion_identity(same_A, real_A)*5.0

        # loss part2 ：GAN loss，即对抗损失
        fake_B = netG_A2B(real_A)#real_A->fake_B
        pred_fake = netD_B(fake_B).squeeze(-1)
        loss_GAN_A2B = criterion_GAN(pred_fake, target_real)

        fake_A = netG_B2A(real_B)#real_B ->fake_A
        pred_fake = netD_A(fake_A).squeeze(-1)
        loss_GAN_B2A = criterion_GAN(pred_fake, target_real)

        # loss part3 ：循环一致性损失
        recovered_A = netG_B2A(fake_B)
        loss_cycle_ABA = criterion_cycle(recovered_A, real_A)*10.0

        recovered_B = netG_A2B(fake_A)
        loss_cycle_BAB = criterion_cycle(recovered_B, real_B)*10.0

        # 生成器总的loss
        loss_G = loss_identity_A + loss_identity_B + loss_GAN_A2B + loss_GAN_B2A + loss_cycle_ABA + loss_cycle_BAB
        loss_G.backward()
        
        #更新两个生成器的参数
        optimizer_G.step()
        ###################################
        
        
        #两个判别器的损失函数和原始GAN的一致，只是度量方式由交叉熵改为了mse
        ###### Discriminator A ######
        optimizer_D_A.zero_grad()

        # Real loss
        pred_real = netD_A(real_A).squeeze(-1)
        loss_D_real = criterion_GAN(pred_real, target_real)

        # Fake loss
        fake_A = fake_A_buffer.push_and_pop(fake_A)
        pred_fake = netD_A(fake_A.detach()).squeeze(-1)
        loss_D_fake = criterion_GAN(pred_fake, target_fake)

        # Total loss
        loss_D_A = (loss_D_real + loss_D_fake)*0.5
        loss_D_A.backward()

        optimizer_D_A.step()
        ###################################

        
        ###### Discriminator B ######
        optimizer_D_B.zero_grad()

        # Real loss
        pred_real = netD_B(real_B).squeeze(-1)
        loss_D_real = criterion_GAN(pred_real, target_real)
        
        # Fake loss
        fake_B = fake_B_buffer.push_and_pop(fake_B)
        pred_fake = netD_B(fake_B.detach()).squeeze(-1)
        loss_D_fake = criterion_GAN(pred_fake, target_fake)

        # Total loss
        loss_D_B = (loss_D_real + loss_D_fake)*0.5
        loss_D_B.backward()

        optimizer_D_B.step()
        ###################################
        if i%30==0:
            print('epoch:',epoch,',i:',i,'loss_G', loss_G.item(),'loss_G_identity', (loss_identity_A + loss_identity_B).item(), 'loss_G_GAN', (loss_GAN_A2B + loss_GAN_B2A).item(),
                    'loss_G_cycle', (loss_cycle_ABA + loss_cycle_BAB).item(), 'loss_D',(loss_D_A + loss_D_B).item())

        # Progress report (http://localhost:8097)
    print('epoch:',epoch,'loss_G', loss_G.item(), 'loss_G_identity', (loss_identity_A + loss_identity_B).item(), 'loss_G_GAN', (loss_GAN_A2B + loss_GAN_B2A).item(),
                    'loss_G_cycle', (loss_cycle_ABA + loss_cycle_BAB).item(), 'loss_D',(loss_D_A + loss_D_B).item())

    scheduler1.step()
    scheduler2.step()
    scheduler3.step()
#     # Save models checkpoints
# torch.save(netG_A2B.state_dict(), './data/CycleGAN-netG_A2B.pth')
# torch.save(netG_B2A.state_dict(), './data/CycleGAN-netG_B2A.pth')
# torch.save(netD_A.state_dict(), './data/CycleGAN-netD_A.pth')
# torch.save(netD_B.state_dict(), './data/CycleGAN-netD_B.pth')
    if epoch!=0 and epoch%5==0:
        torch.save(netG_A2B.state_dict(), './data/1CycleGAN-netG_A2B.pth')
        torch.save(netG_B2A.state_dict(), './data/1CycleGAN-netG_B2A.pth')
        torch.save(netD_A.state_dict(), './data/1CycleGAN-netD_A.pth')
        torch.save(netD_B.state_dict(), './data/1CycleGAN-netD_B.pth')

    # Save models checkpoints

