# encoding: utf-8
# https://blog.csdn.net/Lizhi_Tech/article/details/132108893
# desc：使用gan来转换图像去水印实现img2img

import datetime
from tqdm import tqdm
import torch, time, os
import numpy as np
import torch.nn as nn
import torch.optim as optim

from torchvision import transforms

from torch.utils.data import DataLoader
from torchvision.utils import save_image
import torch.nn.functional as F
from utils.images import batch_tensor_to_img

class DoubleConv(nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.double_conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            # nn.ReLU(inplace=True),
            nn.LeakyReLU(0.1),
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            nn.BatchNorm2d(out_channels),
            nn.LeakyReLU(0.1)
        )

    def forward(self, x):
        return self.double_conv(x)


class Down(nn.Module):

    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.maxpool_conv = nn.Sequential(
            nn.MaxPool2d(2),
            DoubleConv(in_channels, out_channels)
        )

    def forward(self, x):
        return self.maxpool_conv(x)


class Up(nn.Module):
    """Upscaling then double conv"""

    def __init__(self, in_channels, out_channels, bilinear=True):
        super().__init__()

        # if bilinear, use the normal convolutions to reduce the number of channels
        if bilinear:
            self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
        else:
            self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2)

        self.conv = DoubleConv(in_channels, out_channels)

    def forward(self, x1, x2):
        x1 = self.up(x1)
        # input is CHW
        diffY = x2.size()[2] - x1.size()[2]
        diffX = x2.size()[3] - x1.size()[3]

        x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
                        diffY // 2, diffY - diffY // 2])
        # if you have padding issues, see
        # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a
        # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd
        x = torch.cat([x2, x1], dim=1)
        return self.conv(x)


class OutConv(nn.Module):
    def __init__(self, in_channels, out_channels):
        super().__init__()
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)

    def forward(self, x):
        return self.conv(x)


class Generator(nn.Module):
    def __init__(self, n_channels=3, n_classes=3, bilinear=False):
        '''
        初始化生成网络
        :param input_dim:输入维度，也是latent维度
        :param output_dim:输出维度，表示最终生成图片的通道数
        :param class_num:图像种类，代表condition种类
        '''
        super(Generator, self).__init__()

        self.inc = DoubleConv(n_channels, 64)
        self.down1 = Down(64, 128)
        self.down2 = Down(128, 256)
        self.down3 = Down(256, 512)
        self.down4 = Down(512, 1024)
        self.up1 = Up(1024, 512, bilinear)
        self.up2 = Up(512, 256, bilinear)
        self.up3 = Up(256, 128, bilinear)
        self.up4 = Up(128, 64, bilinear)
        self.outc = OutConv(64, n_classes)
        self.same1 = DoubleConv(n_channels, n_channels)
        self.same2 = DoubleConv(n_channels, n_channels)
        self.same3 = DoubleConv(n_channels, n_channels)
        self.same4 = DoubleConv(n_channels, n_channels)
        self.same5 = DoubleConv(n_channels, n_channels)
        self.same6 = DoubleConv(n_channels, n_channels)
        self.same7 = DoubleConv(n_channels, n_channels)
        self.same8 = DoubleConv(n_channels, n_channels)
        self.same9 = DoubleConv(n_channels, n_channels)
        self.same10 = DoubleConv(n_channels, n_channels)
        self.same11 = DoubleConv(n_channels, n_channels)
        self.same12 = DoubleConv(n_channels, n_channels)

    def forward2(self, x):
        x = self.same1(x)
        x = self.same2(x)
        x = self.same3(x)
        x = self.same4(x)
        x = self.same5(x)
        x = self.same6(x)
        x = self.same7(x)
        x = self.same8(x)
        x = self.same9(x)
        x = self.same10(x)
        x = self.same11(x)
        x = self.same12(x)

        return x

    def forward(self, x):
        x1 = self.inc(x)
        x2 = self.down1(x1)
        x3 = self.down2(x2)
        x4 = self.down3(x3)
        x5 = self.down4(x4)
        x = self.up1(x5, x4)
        x = self.up2(x, x3)
        x = self.up3(x, x2)
        x = self.up4(x, x1)
        logits = self.outc(x)
        return logits


class Discriminator(nn.Module):
    def __init__(self, n_channels=3):
        '''
        初始化判别网络
        :param input_dim:输入通道数
        :param output_dim:输出通道数
        '''
        super(Discriminator, self).__init__()
        self.n_channels = n_channels

        self.conv = nn.Sequential(
            nn.Conv2d(in_channels=self.n_channels, out_channels=self.n_channels, kernel_size=1, stride=1, padding=0),
            nn.BatchNorm2d(self.n_channels),
            nn.LeakyReLU(0.2),
            # nn.Conv2d(in_channels=self.n_channels, out_channels=self.n_channels, kernel_size=1, stride=1, padding=0),
            # nn.BatchNorm2d(self.n_channels),
            # nn.LeakyReLU(0.2),
            # nn.Conv2d(in_channels=self.n_channels, out_channels=self.n_channels, kernel_size=1, stride=1, padding=0),
            # nn.BatchNorm2d(self.n_channels),
            # nn.LeakyReLU(0.2),
            # nn.Conv2d(in_channels=self.n_channels, out_channels=self.n_channels, kernel_size=1, stride=1, padding=0),
            # nn.BatchNorm2d(self.n_channels),
            # nn.LeakyReLU(0.2)
        )

    def forward(self, x):
        # x = self.conv(x)
        return x


class ImageGenerator(object):
    def __init__(self, epochs=32, device="cuda:0", lr=0.01, save_model=False, save_best=False):
        super(ImageGenerator, self).__init__()
        self.gen = Generator()
        self.dis = Discriminator()
        self.loss_gen = 0
        self.loss_dis = 0
        self.lr = lr
        self.epochs = epochs
        self.device_name = device
        self.device = torch.device("cpu")
        self.load_to_device()
        self.optim_g = torch.optim.RMSprop(self.gen.parameters(), lr=self.lr)
        self.optim_d = torch.optim.RMSprop(self.dis.parameters(), lr=self.lr)
        self.criterion = nn.MSELoss()
        self.save_model = save_model
        self.save_best = save_best

    def load_to_device(self):
        if torch.cuda.is_available():
            self.device = torch.device(self.device_name)
            print("using device:", self.device)
        self.gen.to(self.device)

    def train(self, train_dataloader):

        for epoch in range(self.epochs):
            print(f"training epoch {epoch}, {datetime.datetime.now()}")
            self.gen.train()
            self.dis.train()
            self.gen.to(self.device)
            self.dis.to(self.device)
            epoch_loss_gen = 0
            epoch_loss_real = 0
            for i, (images, labels) in enumerate(train_dataloader):
                images = images.to(self.device)
                labels = labels.to(self.device)

                # 真实图片输入辨别器，得到真实输出
                self.optim_d.zero_grad()
                real_out = self.dis(labels)
                real_loss = self.criterion(real_out, labels)
                # real_loss.backward()
                # self.optim_d.step()
                epoch_loss_real += real_loss.cpu().data.numpy()

                # 生成图片
                self.optim_g.zero_grad()
                fake_out = self.gen(images)
                fake_loss = self.criterion(fake_out, labels)
                fake_loss.backward()
                self.optim_g.step()
                epoch_loss_gen += fake_loss.cpu().data.numpy()

                if i == 0:
                    batch_tensor_to_img(epoch, fake_out)

            print(f"train real loss={epoch_loss_real}, gen loss={epoch_loss_gen}")
            if self.save_model:
                if not os.path.exists("./models"):
                    os.mkdir("./models")
                if self.save_best:
                    # 跟新save_best相关
                    if epoch_loss_gen < self.loss_gen:
                        self.loss_gen = epoch_loss_gen
                        torch.save(self.gen.state_dict(), f"./models/model_best.pt")
                else:
                    torch.save(self.gen.state_dict(), f"./models/model_epoch{epoch}_loss_{epoch_loss_gen}.pt")

    def eval(self):
        self.gen.eval()
        pass

    def load_weights(self, weights_path: str):
        self.gen.to(torch.device("cpu"))
        self.gen.load_state_dict(torch.load(weights_path, map_location=lambda storage, loc: storage))
        self.gen.to(self.device)


if __name__ == '__main__':
    net = Generator(n_channels=3, n_classes=3)
    x = torch.randn(2, 3, 600, 600)
    y = net(x)
    print(y.shape)
