#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author: 邵奈一
@Email: shaonaiyi@163.com
@Date: 2024/11/21
@微信：shaonaiyi888
@微信公众号: 邵奈一 
"""
from random import randint
import numpy as np
import torch

torch.set_default_tensor_type(torch.FloatTensor)
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import os
import matplotlib.pyplot as plt
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision.utils import save_image
import shutil
import cv2
import random
from PIL import Image
import itertools


def to_img(x):
    out = 0.5 * (x + 1)
    out = out.clamp(0, 1)
    out = out.view(-1, 3, 256, 256)
    return out


# 数据加载
data_path = os.path.abspath('data6')
image_size = 256
batch_size = 1

transform = transforms.Compose([transforms.Resize(int(image_size * 1.12),
                                                  Image.BICUBIC),
                                transforms.RandomCrop(image_size),
                                transforms.RandomHorizontalFlip(),
                                transforms.ToTensor(),
                                transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])


def _get_train_data(batch_size=1):
    # train_a_filepath = data_path + '\\trainA\\'
    # train_b_filepath = data_path + '\\trainB\\'

    train_a_filepath = data_path + os.sep + 'trainA' + os.sep
    train_b_filepath = data_path + os.sep + 'trainB' + os.sep

    train_a_list = os.listdir(train_a_filepath)
    train_b_list = os.listdir(train_b_filepath)

    train_a_result = []
    train_b_result = []

    numlist = random.sample(range(0, len(train_a_list)), batch_size)

    for i in numlist:
        a_filename = train_a_list[i]
        a_img = Image.open(train_a_filepath + a_filename).convert('RGB')
        res_a_img = transform(a_img)
        train_a_result.append(torch.unsqueeze(res_a_img, 0))

        b_filename = train_b_list[i]
        b_img = Image.open(train_b_filepath + b_filename).convert('RGB')
        res_b_img = transform(b_img)
        train_b_result.append(torch.unsqueeze(res_b_img, 0))

    return torch.cat(train_a_result, dim=0), torch.cat(train_b_result, dim=0)


"""
残差网络block

flectionPad2d: 镜像填充，例如 0 1 填充至 4个数，则 0 1 0 1
InstanceNorm2d: 对单个样本的每一层特征图抽出来一层层求均值、方差然后归一化

"""


class ResidualBlock(nn.Module):
    def __init__(self, in_features):
        super(ResidualBlock, self).__init__()
        self.block_layer = nn.Sequential(
            nn.ReflectionPad2d(1),
            nn.Conv2d(in_features, in_features, 3),
            nn.InstanceNorm2d(in_features),
            nn.ReLU(inplace=True),
            nn.ReflectionPad2d(1),
            nn.Conv2d(in_features, in_features, 3),
            nn.InstanceNorm2d(in_features))

    def forward(self, x):
        return x + self.block_layer(x)


# 生成器
class Generator(nn.Module):
    def __init__(self):
        super(Generator, self).__init__()

        model = [nn.ReflectionPad2d(3),
                 nn.Conv2d(3, 64, 7),
                 nn.InstanceNorm2d(64),
                 nn.ReLU(inplace=True)]

        in_features = 64
        out_features = in_features * 2
        for _ in range(2):
            model += [nn.Conv2d(in_features, out_features,
                                3, stride=2, padding=1),
                      nn.InstanceNorm2d(out_features),
                      nn.ReLU(inplace=True)]
            in_features = out_features
            out_features = in_features * 2

        for _ in range(9):
            model += [ResidualBlock(in_features)]

        out_features = in_features // 2
        for _ in range(2):
            model += [nn.ConvTranspose2d(
                in_features, out_features,
                3, stride=2, padding=1, output_padding=1),
                nn.InstanceNorm2d(out_features),
                nn.ReLU(inplace=True)]
            in_features = out_features
            out_features = in_features // 2

        model += [nn.ReflectionPad2d(3),
                  nn.Conv2d(64, 3, 7),
                  nn.Tanh()]

        self.gen = nn.Sequential(*model)

    def forward(self, x):
        x = self.gen(x)
        return x


"""
判别器 
1、这里判别器的最后一层是FCN全卷积网络

avg_pool2d：以均值方式池化，以下述代码为例：
input  = torch.randn(10, 3, 128, 128)
m      = Discriminator()
output = m(input)

此时在 avg_pool2d 前 x.size() 为 torch.Size([10, 1, 14, 14])
对 10个[14, 15]的tensor求均值并返回
tensor([[0.1162],
        [0.1298],
        [0.1266],
        [0.1229],
        [0.1085],
        [0.1121],
        [0.1064],
        [0.1044],
        [0.1077],
        [0.1139]], grad_fn=<ViewBackward>)

"""


class Discriminator(nn.Module):
    def __init__(self):
        super(Discriminator, self).__init__()
        self.dis = nn.Sequential(
            nn.Conv2d(3, 64, 4, 2, 1, bias=False),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(64, 128, 4, 2, 1, bias=False),
            nn.InstanceNorm2d(128),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(128, 256, 4, 2, 1, bias=False),
            nn.InstanceNorm2d(256),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(256, 512, 4, padding=1),
            nn.InstanceNorm2d(512),
            nn.LeakyReLU(0.2, inplace=True),

            nn.Conv2d(512, 1, 4, padding=1))

    def forward(self, x):
        x = self.dis(x)
        return F.avg_pool2d(x, x.size()[2:]).view(x.size()[0], -1)


"""
训练神经网络
存在生成网络 G_A2B 从A类图片生成B类图片
存在生成网络 G_B2A 从B类图片生成A类图片 
存在判别器   D_A   对A类图片真伪进行判断
存在判别器   D_B   对B类图片真伪进行判断 

存在损失函数：criterion_GAN MSELoss：均方损失函数，即 两个向量各分量差的平方
存在损失函数：criterion_cycle L1Loss：平均均对误差，即 两个向量各分量差的绝对值求和再除分量数
存在损失函数: criterion_identity L1Loss:

第一步：训练生成器
1、利用 G_A2B 将 real_B 生成 same_B,获取 real_B 和 same_B 之间的损失
2、利用 G_B2A 将 real_A 生成 same_A,获取 real_A 和 same_A 之间的损失
3、利用 G_A2B 将 real_A 生成 fake_B, 通过 netD_B 判断 fake_B，获取其与判真之间的损失
4、利用 G_B2A 将 real_B 生成 faka_A，通过 netD_A 判断 fake_A，获取其与判真之间的损失
5、利用 G_B2A 将 fake_B 生成 recovered_A，获取其与 real_A 之间的损失
6、利用 G_A2B 将 fake_A 生成 recovered_B, 获取其与 real_B 之间的损失

此时对所有损失求和后训练生成器，此时：

对于 G_A2B：
    1. 如果输入是 B类图片，它生成的图片same_B总体像素层面上接近B
    2. 如果输入是 A类图片，它生成的图片fake_B会具有B类图片的卷积特征
    3. 对于生成的图片fake_B, 它经过 G_B2A 生成的recovered_A 会总体像素层面上接近A

    此时我们有了一张 A 经过 G_A2B 和 G_B2A 成为一张 新的A，这张新A像素总体上是A（是一批马）
    但细节纹理上具有 B 的特征（斑马纹理）

同理有 G_B2A

第二步：训练判别器
1、对判别器 netD_A 可以对 real_A 判真 
2、对判别器 netD_A 可以对 fake_A 判伪
3、对判别器 netD_B 可以对 real_B 判真 
4、对判别器 netD_B 可以对 fake_B 判伪
"""


class ReplayBuffer():
    """
    缓存队列，若不足则新增，否则随机替换
    """

    def __init__(self, max_size=50):
        self.max_size = max_size
        self.data = []

    def push_and_pop(self, data):
        to_return = []
        for element in data.data:
            element = torch.unsqueeze(element, 0)
            if len(self.data) < self.max_size:
                self.data.append(element)
                to_return.append(element)
            else:
                if random.uniform(0, 1) > 0.5:
                    i = random.randint(0, self.max_size - 1)
                    to_return.append(self.data[i].clone())
                    self.data[i] = element
                else:
                    to_return.append(element)
        return Variable(torch.cat(to_return))


fake_A_buffer = ReplayBuffer()
fake_B_buffer = ReplayBuffer()

netG_A2B = Generator()
netG_B2A = Generator()
netD_A = Discriminator()
netD_B = Discriminator()

criterion_GAN = torch.nn.MSELoss()
criterion_cycle = torch.nn.L1Loss()
criterion_identity = torch.nn.L1Loss()

d_learning_rate = 3e-4  # 3e-4
g_learning_rate = 3e-4
optim_betas = (0.5, 0.999)

g_optimizer = optim.Adam(itertools.chain(netG_A2B.parameters(),
                                         netG_B2A.parameters()),
                         lr=d_learning_rate)
da_optimizer = optim.Adam(netD_A.parameters(), lr=d_learning_rate)
db_optimizer = optim.Adam(netD_B.parameters(), lr=d_learning_rate)

num_epochs = 1000

# 检查train_b是否有灰度图
"""
print(np.arange(196608).reshape(256,256,3).shape)
train_b_filepath = data_path + "\\trainB\\"
train_b_list = os.listdir(train_b_filepath)
for i in range(len(train_b_list)):
        b_filename = train_b_list[i]
        b_img = Image.open(train_b_filepath + b_filename)
        if np.array(b_img).shape != np.arange(196608).reshape(256,256,3).shape:
            print(b_filename)
            os.remove(train_b_filepath + b_filename)
"""

for epoch in range(num_epochs):

    real_a, real_b = _get_train_data(batch_size)
    target_real = torch.full((batch_size,), 1).float()
    target_fake = torch.full((batch_size,), 0).float()

    g_optimizer.zero_grad()

    # 第一步：训练生成器
    same_B = netG_A2B(real_b).float()
    loss_identity_B = criterion_identity(same_B, real_b) * 5.0
    same_A = netG_B2A(real_a).float()
    loss_identity_A = criterion_identity(same_A, real_a) * 5.0

    fake_B = netG_A2B(real_a).float()
    pred_fake = netD_B(fake_B).float()
    loss_GAN_A2B = criterion_GAN(pred_fake, target_real)
    fake_A = netG_B2A(real_b).float()
    pred_fake = netD_A(fake_A).float()
    loss_GAN_B2A = criterion_GAN(pred_fake, target_real)
    recovered_A = netG_B2A(fake_B).float()
    loss_cycle_ABA = criterion_cycle(recovered_A, real_a) * 10.0
    recovered_B = netG_A2B(fake_A).float()
    loss_cycle_BAB = criterion_cycle(recovered_B, real_b) * 10.0
    loss_G = (loss_identity_A + loss_identity_B + loss_GAN_A2B +
              loss_GAN_B2A + loss_cycle_ABA + loss_cycle_BAB)
    loss_G.backward()
    g_optimizer.step()

    # 第二步：训练判别器
    # 训练判别器A
    da_optimizer.zero_grad()
    pred_real = netD_A(real_a).float()
    loss_D_real = criterion_GAN(pred_real, target_real)
    fake_A = fake_A_buffer.push_and_pop(fake_A)
    pred_fake = netD_A(fake_A.detach()).float()
    loss_D_fake = criterion_GAN(pred_fake, target_fake)
    loss_D_A = (loss_D_real + loss_D_fake) * 0.5
    loss_D_A.backward()
    da_optimizer.step()
    # 训练判别器B
    db_optimizer.zero_grad()
    pred_real = netD_B(real_b)
    loss_D_real = criterion_GAN(pred_real, target_real)
    fake_B = fake_B_buffer.push_and_pop(fake_B)
    pred_fake = netD_B(fake_B.detach())
    loss_D_fake = criterion_GAN(pred_fake, target_fake)
    loss_D_B = (loss_D_real + loss_D_fake) * 0.5
    loss_D_B.backward()
    db_optimizer.step()

    # 损失打印，存储伪造图片
    print('Epoch[{}],loss_G:{:.6f} ,loss_D_A:{:.6f},loss_D_B:{:.6f}'
          .format(epoch, loss_G.data.item(), loss_D_A.data.item(),
                  loss_D_B.data.item()))
    if (epoch + 1) % 20 == 0 or epoch == 0:
        b_fake = to_img(fake_B.data)
        a_fake = to_img(fake_A.data)
        a_real = to_img(real_a.data)
        b_real = to_img(real_b.data)
        save_image(a_fake, 'tmp/my_a_fake.png')
        save_image(b_fake, 'tmp/my_b_fake.png')
        save_image(a_real, 'tmp/my_a_real.png')
        save_image(b_real, 'tmp/my_b_real.png')