# -*- coding: utf-8 -*-
# @Time    : 2021/7/7 14:46
# @Author  : LuoTianHang


# ####################train.py 说明##########################
# this script is the core py to train the model


import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.backends import cudnn
from torch.utils.data import DataLoader
from tqdm import tqdm

from config.config import get_parser
from datalist import train_set, test_set
from model import discriminator, generator
from utils import showimg


class CGAN(object):
    def __init__(self):
        self.args = get_parser()
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()

        if use_cuda:
            torch.cuda.manual_seed(self.args.seed)
            torch.cuda.manual_seed_all(self.args.seed)
        else:
            torch.manual_seed(self.args.seed)

        self.device = torch.device("cuda" if use_cuda else "cpu")

        kwargs = {"num_workers": 0, "pin_memory": True} if use_cuda else {"num_workers": 0, "pin_memory": False}
        '''
        构造DataLoader
        '''

        self.train_dataloader = DataLoader(train_set, batch_size=self.args.train_batch_size, shuffle=True, **kwargs)
        self.test_dataloader = DataLoader(test_set, batch_size=self.args.test_batch_size, shuffle=False, **kwargs)

        '''
        定义模型
        '''
        self.z_dimension = 110
        self.count = 0
        self.D = discriminator().to(self.device)
        self.G = generator(self.z_dimension, 3136).to(self.device)  # 1*56*56

        '''
                CUDA加速
                '''
        if use_cuda:
            self.D = torch.nn.DataParallel(self.D, device_ids=range(torch.cuda.device_count()))
            self.G = torch.nn.DataParallel(self.G, device_ids=range(torch.cuda.device_count()))
            cudnn.enabled = True
            cudnn.benchmark = True
        if self.args.resume == False:
            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.D.state_dict()
                checkpoint = torch.load(self.args.pretrained_weight)
                pretrained_dict = checkpoint['model_state_dict']
                new_dict = {}
                for k, v in pretrained_dict.items():
                    new_dict[k] = v
                pretrained_dict = {k: v for k, v in new_dict.items() if
                                   np.shape(model_dict[k]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.D.load_state_dict(model_dict, strict=True)
                print("Restoring the weight from pretrained-weight file \nFinished to load the weight")
            except Exception as e:
                print("can not load weight \n train the model from beginning")
                raise e

            try:
                print("load the weight from pretrained-weight file")
                model_dict = self.G.state_dict()
                checkpoint = torch.load(self.args.pretrained_weight)
                pretrained_dict = checkpoint['model_state_dict']
                new_dict = {}
                for k, v in pretrained_dict.items():
                    new_dict[k] = v
                pretrained_dict = {k: v for k, v in new_dict.items() if
                                   np.shape(model_dict[k]) == np.shape(v)}
                model_dict.update(pretrained_dict)
                self.G.load_state_dict(model_dict, strict=True)
                print("Restoring the weight from pretrained-weight file \nFinished to load the weight")
            except Exception as e:
                print("can not load weight \n train the model from beginning")
                raise e

        '''
       构造loss目标函数
       选择优化器
       学习率变化选择
       '''
        # blank 在ctc loss中默认排在第一个
        # CTCLoss要求 input_length >= 2*target_length+1 , 比如 abcd这个label，输出必须能放得下-a-b-c-d-，不然就会nan
        self.criterion = torch.nn.BCELoss().to(self.device)
        self.optimizer_G = torch.optim.Adam(params=self.G.parameters(), lr=self.args.lr, weight_decay=1e-4)
        self.optimizer_D = torch.optim.Adam(params=self.D.parameters(), lr=self.args.lr, weight_decay=1e-4)
        self.scheduler_G = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_G, self.args.milestones, gamma=0.5)
        # 这里是模型训练的关键之处，调节的好训练的块
        self.scheduler_D = torch.optim.lr_scheduler.MultiStepLR(self.optimizer_D, self.args.milestones, gamma=0.5)
        # 这里是模型训练的关键之处，调节的好训练的块

        for epoch in range(self.args.epochs):
            self.train(epoch)
            self.test(epoch)
        torch.cuda.empty_cache()
        print("finish model training")

    def train(self, epoch):
        self.D.train()
        self.G.train()

        gepoch = 2

        pbar = tqdm(self.train_dataloader)
        for img, label in pbar:
            label_onehot = np.zeros((self.args.train_batch_size, 10))
            label_onehot[np.arange(self.args.train_batch_size), label.numpy()] = 1
            img = img.to(self.device)
            real_label = torch.from_numpy(label_onehot).float().to(self.device)
            fake_label = torch.zeros(self.args.train_batch_size, 10).to(self.device)

            real_out = self.D(img)
            d_loss_real = self.criterion(real_out, real_label)

            z = torch.randn(self.args.train_batch_size, self.z_dimension).to(self.device)
            fake_img = self.G(z)
            fake_out = self.D(fake_img)

            d_loss_fake = self.criterion(fake_out, fake_label)

            d_loss = d_loss_fake + d_loss_real
            self.optimizer_D.zero_grad()
            d_loss.backward()
            self.optimizer_D.step()

            for j in range(gepoch):
                z = torch.randn(self.args.train_batch_size, 100)
                z = np.concatenate((z.numpy(), label_onehot), axis=1)
                z = torch.from_numpy(z).float().to(self.device)
                fake_img = self.G(z)
                output = self.D(fake_img)
                g_loss = self.criterion(output, real_label)
                self.optimizer_G.zero_grad()
                g_loss.backward()
                self.optimizer_G.step()
            pbar.set_description(f"Train Epoch:{epoch}\t D_loss:{d_loss.item()}\t G_loss:{g_loss.item()}")
        self.scheduler_G.step()
        self.scheduler_D.step()

    @torch.no_grad()
    def test(self, epoch):
        self.D.eval()
        self.G.eval()

        pbar = tqdm(self.test_dataloader)
        for index, (img, label) in enumerate(pbar):
            label_onehot = np.zeros((self.args.test_batch_size, 10))
            label_onehot[np.arange(self.args.test_batch_size), label.numpy()] = 1
            z = torch.randn(self.args.test_batch_size, 100)
            z = np.concatenate((z.numpy(), label_onehot), axis=1)
            z = torch.from_numpy(z).float().to(self.device)

            fake_img = self.G(z)

            showimg(fake_img, count=self.count)
            self.count += 1
            plt.show()
            break


model = CGAN()
