# encoding: utf-8

import functools

import os
import torch
import torch.nn as nn
from PIL import Image
import matplotlib.pyplot as plt

import numpy as np
from tqdm import tqdm

import torchvision.transforms as transforms
import torchvision.utils as vutils

from constant import *

IMG_EXTENSIONS = [
    '.jpg', '.JPG', '.jpeg', '.JPEG',
    '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
    '.tif', '.TIF', '.tiff', '.TIFF',
]


def is_image_file(filename):
    return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)


def make_dataset(_dir, max_dataset_size=float("inf")):
    """ 读取目录下的所有文件名作为数据集
    """
    images = []
    assert os.path.isdir(_dir), '%s is not a valid directory' % _dir

    for root, _, fnames in sorted(os.walk(_dir)):
        for fname in fnames:
            if is_image_file(fname):
                path = os.path.join(root, fname)
                images.append(path)
    return images[:min(max_dataset_size, len(images))]


def gen_transform(grayscale=False, resize=True):
    """ 生成图片的处理器
    """
    transform_list = []
    if grayscale:
        transform_list.append(transforms.Grayscale(1))
    if resize:
        transform_list.append(transforms.Resize(image_size))
    transform_list.append(transforms.CenterCrop(image_size))

    # 随机翻转图像，进行数据增强
    transform_list.append(transforms.RandomHorizontalFlip())

    transform_list.append(transforms.ToTensor())

    if grayscale:
        transform_list.append(transforms.Normalize((0.5,), (0.5,)))
    else:
        transform_list.append(transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)))

    return transforms.Compose(transform_list)


class AlignedDataset(torch.utils.data.Dataset):
    def __init__(self, root, left_is_A=True, phase='train', max_dataset_size=float('inf')):
        self.dir_AB = os.path.join(root, phase)  # get the image directory
        self.AB_paths = sorted(make_dataset(self.dir_AB, max_dataset_size))  # get image paths

        # 生成A和B的图片处理器
        self.A_transform = gen_transform(grayscale=(input_nc == 1))
        self.B_transform = gen_transform(grayscale=(output_nc == 1))
        self.left_is_A = left_is_A

    def __getitem__(self, index):
        """
        """
        # 从文件中加载出图片
        AB_path = self.AB_paths[index]
        AB = Image.open(AB_path).convert('RGB')

        # 切分图片的A/B面
        w, h = AB.size
        w2 = int(w / 2)
        A = AB.crop((0, 0, w2, h))
        B = AB.crop((w2, 0, w, h))

        if not self.left_is_A:
            A, B = B, A

        A = self.A_transform(A)
        B = self.B_transform(B)

        return A, B

    def __len__(self):
        return len(self.AB_paths)


class DataGen2(torch.utils.data.Dataset):
    def __init__(self, file_path: str):
        self.data = []
        self.label = []
        self.transform = gen_transform()
        self.load_data(file_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.data[index], self.label[index]

    def load_data(self, file_path):
        labels = os.listdir(os.path.join(file_path, "label"))
        for file_name in tqdm(labels, desc="加载数据集"):
            # print(file_name)  # AP3361_0.jpg
            if os.path.exists(os.path.join(file_path, "origin", file_name)):
                img_a = self.transform(Image.open(os.path.join(file_path, "origin", file_name)))
                self.data.append(img_a)
                img_b = self.transform(Image.open(os.path.join(file_path, "label", file_name)))
                self.label.append(img_b)


class DataGen3(torch.utils.data.Dataset):
    def __init__(self, file_path: str):
        self.data = []
        self.label = []
        self.transform = gen_transform()
        self.load_data(file_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.transform(Image.open(self.data[index]).convert("RGB")), self.transform(
            Image.open(self.label[index]).convert("RGB"))

    def load_data(self, file_path):
        labels = os.listdir(os.path.join(file_path, "label"))
        for file_name in labels:
            # print(file_name)  # AP3361_0.jpg
            if os.path.exists(os.path.join(file_path, "origin", file_name)):
                # img_a = self.transform(Image.open(os.path.join(file_path, "origin", file_name)))
                self.data.append(os.path.join(file_path, "origin", file_name))
                # img_b = self.transform(Image.open(os.path.join(file_path, "label", file_name)))
                self.label.append(os.path.join(file_path, "label", file_name))


class DataGen4(torch.utils.data.Dataset):
    def __init__(self, file_path: str):
        self.data = []
        self.label = []
        self.transform = gen_transform(resize=False)
        self.load_data(file_path)

    def __len__(self):
        return len(self.data)

    def __getitem__(self, index):
        return self.transform(Image.open(self.data[index]).convert("RGB")), self.transform(
            Image.open(self.label[index]).convert("RGB"))

    def load_data(self, file_path):
        self.data.append(file_path)
        self.label.append(file_path)


def make_aligned_img(A, B, normA=True):
    """ 将AB 张量拼接成一张图片
    """
    A, B = A.cpu(), B.cpu()
    if A.dim() == 3:
        A = A[None, :, :, :]
    if B.dim() == 3:
        B = B[None, :, :, :]

    aligned = torch.cat((B, A), dim=3)
    aligned = vutils.make_grid(aligned, padding=2, normalize=True)
    aligned = np.transpose(aligned, (1, 2, 0))
    return aligned


# # 创建数据集
# dataset = AlignedDataset(dataroot, left_is_A=False)
# # 创建加载器
# dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
#                                          shuffle=True, num_workers=workers)


dataset = DataGen3(file_path=dataroot)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
                                         shuffle=True, num_workers=workers)

# 选择我们运行在上面的设备
device = torch.device("cuda:0" if (torch.cuda.is_available() and ngpu > 0) else "cpu")


# 自定义的权重初始化函数，用于初始化netG和netD网络
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        nn.init.normal_(m.weight.data, 0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        nn.init.normal_(m.weight.data, 1.0, 0.02)
        nn.init.constant_(m.bias.data, 0)


class UnetGenerator(nn.Module):
    """基于 Unet 生成器"""

    def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
        """
        参数:
            input_nc (int)  -- 输入图片的通道数
            output_nc (int) -- 输出图片的通道数
            num_downs (int) -- UNet下采样次数. 例如, # 如果 |num_downs| == 7,
                                图片的大小如果是128x128，经过7次下采样后会变成1x1
            ngf (int)       -- 特征图大小
            norm_layer      -- 归一化层

        我们从最里面的层开始逐步往外构建，
        这可以看成是一个递归操作
        """
        super(UnetGenerator, self).__init__()
        # 构建UNet架构
        unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True)  # 最内层
        for i in range(num_downs - 5):  # 使用 ngf * 8 个filters添加中间层
            unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
        # 主键将filters数量从 ngf * 8 降到 ngf
        unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
        self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer)  # 最外层

    def forward(self, input):
        return self.model(input)


class UnetSkipConnectionBlock(nn.Module):
    """UNet的子模块，带有残差连接
    """

    def __init__(self, outer_nc, inner_nc, input_nc=None,
                 submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
        """构建一个带残差连接的UNet子模块

        Parameters:
            outer_nc (int) -- 外层filters数量
            inner_nc (int) -- 内层filters数量
            input_nc (int) -- 输入图片/特征图的通道数
            submodule (UnetSkipConnectionBlock) -- 中间夹的子模块
            outermost (bool)    -- 是否为最外层
            innermost (bool)    -- 是否为最内层
            norm_layer          -- 归一化层
            use_dropout (bool)  -- 是否使用Dropout层
        """
        super(UnetSkipConnectionBlock, self).__init__()
        self.outermost = outermost
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d
        if input_nc is None:
            input_nc = outer_nc
        downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
                             stride=2, padding=1, bias=use_bias)
        downrelu = nn.LeakyReLU(0.2, True)
        downnorm = norm_layer(inner_nc)
        uprelu = nn.ReLU(True)
        upnorm = norm_layer(outer_nc)

        if outermost:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1)
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1, bias=use_bias)
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
                                        kernel_size=4, stride=2,
                                        padding=1, bias=use_bias)
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            if use_dropout:
                model = down + [submodule] + up + [nn.Dropout(0.5)]
            else:
                model = down + [submodule] + up

        self.model = nn.Sequential(*model)

    def forward(self, x):
        if self.outermost:
            # 最外层不用加残差连接了
            return self.model(x)
        else:  # 添加残差连接
            return torch.cat([x, self.model(x)], 1)


class NLayerDiscriminator(nn.Module):
    def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
        """构建PatchGAN判别器
        n_layers=3 表示 70x70的PatchGAN
        """
        super(NLayerDiscriminator, self).__init__()
        if type(norm_layer) == functools.partial:
            use_bias = norm_layer.func == nn.InstanceNorm2d
        else:
            use_bias = norm_layer == nn.InstanceNorm2d

        kw = 4
        padw = 1
        sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
        nf_mult = 1
        nf_mult_prev = 1
        for n in range(1, n_layers):  # 逐渐增加filters数量
            nf_mult_prev = nf_mult
            nf_mult = min(2 ** n, 8)
            sequence += [
                nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
                norm_layer(ndf * nf_mult),
                nn.LeakyReLU(0.2, True)
            ]

        nf_mult_prev = nf_mult
        nf_mult = min(2 ** n_layers, 8)
        sequence += [
            nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
            norm_layer(ndf * nf_mult),
            nn.LeakyReLU(0.2, True)
        ]

        sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]  # output 1 channel prediction map
        self.model = nn.Sequential(*sequence)

    def forward(self, input):
        return self.model(input)
