import argparse
from pathlib import Path

import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.utils.data as data
from PIL import Image, ImageFile
from tensorboardX import SummaryWriter
from torchvision import transforms
from tqdm import tqdm

from models import net
from sampler import InfiniteSamplerWrapper

cudnn.benchmark = True
Image.MAX_IMAGE_PIXELS = None  # Disable DecompressionBombError
# Disable OSError: image file is truncated
ImageFile.LOAD_TRUNCATED_IMAGES = True


def train_transform():
    transform_list = [
        transforms.Resize(size=(512, 512)),
        transforms.RandomCrop(256),
        transforms.ToTensor()
    ]
    return transforms.Compose(transform_list)


class FlatFolderDataset(data.Dataset):
    def __init__(self, content_root, style_root, transform):
        super(FlatFolderDataset, self).__init__()
        self.content_root = content_root
        self.style_root = style_root
        self.content_paths = list(Path(self.content_root).glob('*.png'))
        self.style_paths = list(Path(self.style_root).glob('*.png'))
        self.transform = transform

    def __getitem__(self, index):
        # content
        content_path = self.content_paths[index]
        content_img = Image.open(str(content_path)).convert('RGB')
        content_img = self.transform(content_img)
        # style
        style_path = self.style_paths[index]
        style_img = Image.open(str(style_path)).convert('RGB')
        style_img = self.transform(style_img)
        return content_img, style_img

    def __len__(self):
        return len(self.content_paths)

    def name(self):
        return 'FlatFolderDataset'


def adjust_learning_rate(optimizer, iteration_count):
    """Imitating the original implementation"""
    lr = args.lr / (1.0 + args.lr_decay * iteration_count)
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    # Basic options
    parser.add_argument('--content_dir', type=str, required=True,
                        help='Directory path to a batch of content images')
    parser.add_argument('--style_dir', type=str, required=True,
                        help='Directory path to a batch of style images')
    parser.add_argument('--vgg', type=str, default='models/vgg_normalised.pth')
    parser.add_argument('--decoder', type=str, default='adain/decoder.pth')

    # training options
    parser.add_argument('--save_dir', default='./experiments',
                        help='Directory to save the model')
    parser.add_argument('--log_dir', default='./logs',
                        help='Directory to save the log')
    parser.add_argument('--lr', type=float, default=1e-4)
    parser.add_argument('--lr_decay', type=float, default=5e-5)
    parser.add_argument('--epoch', type=int, default=2000)
    parser.add_argument('--batch_size', type=int, default=8)
    parser.add_argument('--style_weight', type=float, default=10.0)
    parser.add_argument('--content_weight', type=float, default=1.0)
    parser.add_argument('--n_threads', type=int, default=4)
    parser.add_argument('--save_model_interval', type=int, default=100)
    args = parser.parse_args()

    device = torch.device('cuda')
    save_dir = Path(args.save_dir)
    save_dir.mkdir(exist_ok=True, parents=True)
    log_dir = Path(args.log_dir)
    log_dir.mkdir(exist_ok=True, parents=True)
    writer = SummaryWriter(log_dir=str(log_dir))

    # -------------- model ---------------
    decoder = net.decoder
    vgg = net.vgg

    vgg.load_state_dict(torch.load(args.vgg))  # vgg的权重参数是固定的，不进行更新
    decoder.load_state_dict(torch.load(args.decoder))  # decode参数会进行更新

    vgg = nn.Sequential(*list(vgg.children())[:31])
    network = net.Net(vgg, decoder)
    network.train()
    network.to(device)
    # -------------- model ---------------

    # -------------- data --------------
    dataset = FlatFolderDataset(args.content_dir, args.style_dir, train_transform())

    dataloader = data.DataLoader(dataset, batch_size=args.batch_size, num_workers=args.n_threads)
    # -------------- data --------------

    # -------------- optimizer ------------
    optimizer = torch.optim.Adam(network.decoder.parameters(), lr=args.lr)
    # -------------- optimizer ------------

    # start train
    print('start training!')
    for e in range(0, args.epoch):
        pbar = tqdm(enumerate(dataloader))
        pbar.set_description('{}/{}'.format(e+1, args.epoch))
        for idx, (content_images, style_images) in pbar:
            # 学习率调整
            adjust_learning_rate(optimizer, iteration_count=e)
            # 得到图片
            content_images = content_images.to(device)
            style_images = style_images.to(device)
            # 前向传播得到损失
            loss_c, loss_s = network(content_images, style_images)
            loss_c = args.content_weight * loss_c
            loss_s = args.style_weight * loss_s
            loss = loss_c + loss_s
            # 反向传播
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            # 在bar中显示输出
            pbar.set_postfix(
                {'loss_content': '{0:1.5f}'.format(loss_c.item()), 'loss_style': '{0:1.5f}'.format(loss_s.item())})
            # 保存损失
            # writer.add_scalar('loss_content', loss_c.item(), e + 1)
            # writer.add_scalar('loss_style', loss_s.item(), e + 1)
        # 进行模型保存
        if (e + 1) % args.save_model_interval == 0 or (e + 1) == args.epoch:
            state_dict = net.decoder.state_dict()  # 只保存decoder
            for key in state_dict.keys():
                state_dict[key] = state_dict[key].to(torch.device('cpu'))
            torch.save(state_dict, save_dir /
                       'decoder_iter_{:d}.pth.tar'.format(e + 1))
    writer.close()
