from config import GetConfig
import torch
from datalist import AnimeDataset
from torch.utils.data import DataLoader
from model2 import AnimeGenerator,AnimeDiscriminator
from utils import LSGanLoss,ContentLoss,StyleLoss,denormalize,TVLoss,weights_init
from torch import nn
from torchvision.models import vgg19
from PIL import Image
from torch import optim
from tqdm import tqdm

class Train:
    def __init__(self) -> None:
        self.args = GetConfig()
        
        print(f"-----------{self.args.project_name}-------------")
        use_cuda = self.args.use_cuda and torch.cuda.is_available()
        print("use_cuda:", use_cuda)
        
        self.device = torch.device('cuda' if use_cuda else 'cpu')
        
        train_kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
        
        self.train_dataset = AnimeDataset()
        self.train_dataloader = DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, shuffle=True, drop_last=True,
                                           **train_kwargs)


        self.generate = AnimeGenerator()
        self.discriminate = AnimeDiscriminator()
        
        self.vgg = vgg19(pretrained=True).to(self.device)
        self.vgg = self.vgg.features[:26]
        self.vgg.eval()
        self.criterion = LSGanLoss().to(self.device)
        self.content_loss = ContentLoss().to(self.device)
        self.huber_loss = nn.SmoothL1Loss().to(self.device)
        self.gram_loss = StyleLoss().to(self.device)
        self.l1loss = nn.L1Loss().to(self.device)
        
        
        self.generate.train()
        self.discriminate.train()
        
        self.generate.to(self.device)
        self.discriminate.to(self.device)
        
        if use_cuda:
            self.generate = torch.nn.DataParallel(self.generate, device_ids=range(torch.cuda.device_count()))
            self.discriminate = torch.nn.DataParallel(self.discriminate, device_ids=range(torch.cuda.device_count()))
            self.vgg = torch.nn.DataParallel(self.vgg, device_ids=range(torch.cuda.device_count()))
            
        if self.args.resume:
            self.generate.load_state_dict(torch.load("/home/pc/dataset/animeganv2/weights/generate_pretrain.pth")["model_state_dict"],strict=True)
        else:
            weights_init(self.generate,init_gain=0.02)
        weights_init(self.discriminate,init_gain=0.02)
        self.optimizer_g = optim.Adam(self.generate.parameters(), lr=2e-4, betas=(0.5, 0.999))
        self.optimizer_d = optim.Adam(self.discriminate.parameters(), lr=4e-4, betas=(0.5, 0.999))
        
        self.Y = torch.tensor([0.299, 0.587, 0.114]).to(self.device)
        self.U = torch.tensor([-0.14714119, -0.28886916, 0.43601035]).to(self.device)
        self.V = torch.tensor([0.61497538, -0.51496512, -0.10001026]).to(self.device)

        
        self.tvloss= TVLoss(1)
        
        
    def run(self):
        for epoch in range(1,400+1):
            if epoch < 0:
                self.init_train(epoch)
            else:
                self.train(epoch)
            
        torch.cuda.empty_cache()
        print("finish model training")
        
    def test(self,epoch):
        pass
    
    
    def init_train(self,epoch):
        self.set_lr(self.optimizer_g,2e-4)
        self.generate.train()
        pbar = tqdm(self.train_dataloader,desc=f'Train Epoch {epoch} / {self.args.epochs}')
 
        for photo,cartoon,gray,smooth in pbar:
            photo, cartoon, gray, smooth = photo.to(self.device), cartoon.to(self.device), gray.to(self.device), smooth.to(self.device)
            
            self.optimizer_g.zero_grad()
            generator_img  = self.generate(photo)
            photo_content = self.vgg(photo)
     
            generator_content = self.vgg(generator_img)
            # content loss
            loss_content= 2.5*self.content_loss(photo_content,generator_content)
            
            
            loss_content.backward()
            
            self.optimizer_g.step()
            pbar.set_description(f"[Epoch]:{epoch}\t g_loss:{loss_content.item()}")

        torch.save({
            'model_state_dict': self.generate.state_dict()
        },
            '/home/pc/dataset/animeganv2/weights/generate_pretrain.pth')
        self.get_image(generator_img,photo)
    def train(self,epoch):
        ###
        #
        #  g_loss = adv_g + content + style + color + tv
        #  d_loss = dav_d
        ###
        self.generate.train()
        self.discriminate.train()
        
        self.set_lr(self.optimizer_g,2e-5)
        self.set_lr(self.optimizer_d,4e-5)
        
        g_loss = torch.tensor(0,device=self.device)
        d_loss = torch.tensor(0,device=self.device)
        
        pbar = tqdm(self.train_dataloader,desc=f'Train Epoch {epoch} / {self.args.epochs}')
        count = 0
        for photo,cartoon,gray,smooth in pbar:
            photo, cartoon, gray, smooth = photo.to(self.device), cartoon.to(self.device), gray.to(self.device), smooth.to(self.device)
            if count % 2 != 0:
                # generator
                
                # photo cartoon gray smooth
                
                self.optimizer_g.zero_grad()
                
                generator_img  = self.generate(photo)
                fake_logit = self.discriminate(generator_img)
                
                # adv loss 
                loss_adv_gen  = self.criterion._g_loss(fake_logit)
                
                photo_content = self.vgg(photo)
                
                _,c,h,w = photo_content.shape
                generator_content = self.vgg(generator_img)
                
                
                # content loss
                loss_content= self.content_loss(photo_content,generator_content)
                
                
                
                gray_cartoon_content = self.vgg(gray)
                 # style loss
                loss_gram = self.gram_loss(generator_content, gray_cartoon_content)
                
                y_generator = generator_img* self.Y[None, :, None, None] + 0
                y_photo = photo * self.Y[None, :, None, None] + 0

                u_generator = generator_img  * self.U[None, :, None, None] + 0.5
                u_photo = photo* self.U[None, :, None, None] + 0.5

                v_generator = generator_img  * self.V[None, :, None, None] + 0.5
                v_photo = photo * self.V[None, :, None, None] + 0.5
                # color loss
                loss_color_recon = self.l1loss(y_generator, y_photo) \
                                   + self.huber_loss(u_generator, u_photo) \
                                   + self.huber_loss(v_generator, v_photo)
                
                # tvloss
                tvloss = self.tvloss(generator_img)
                
                g_loss = 300 * loss_adv_gen + 1.5 * loss_content + 2.5 * loss_gram + 0 * loss_color_recon + 1*tvloss
                # g_loss = 10 * loss_adv_gen + 1.5 * loss_content + 3 * loss_gram + 30 * loss_color_recon+ 1*tvloss

                g_loss.backward()

                self.optimizer_g.step()
                
            else:
                # discrimnator
                self.optimizer_d.zero_grad()
                real_logit = self.discriminate(cartoon)
                fake_logit_smooth = self.discriminate(smooth)
                fake_logit_gray = self.discriminate(gray)
                generator_img = self.generate(photo)
                fake_logit = self.discriminate(generator_img)
                loss_adv1 = self.criterion._d_loss2(logit=fake_logit, a=0)
                loss_adv2 = self.criterion._d_loss2(logit=fake_logit_smooth, a=0)
                loss_adv3 = self.criterion._d_loss2(logit=fake_logit_gray, a=0)
                loss_adv4 = self.criterion._d_loss2(logit=real_logit, a=1)

                d_loss = 300 * (1.7*loss_adv4 + 1.7*loss_adv1 + 1.0*loss_adv2 + 1.7*loss_adv3)
                # d_loss = 10 * (loss_adv4 + loss_adv1 + 0.1 * loss_adv2 + loss_adv3)
                d_loss.backward()
                self.optimizer_d.step()
                
            self.get_image(generator_img,photo)
            pbar.set_description(f"[Epoch]:{epoch}\td_loss:{d_loss.item()}\tg_loss:{g_loss.item()}")

            count+=1
        torch.save({
            'model_state_dict': self.generate.state_dict()
        },
            '/home/pc/dataset/animeganv2/weights/generate_best.pth')
    
    @staticmethod
    def get_image(image, photo, name="result"):
        assert image.shape[0] >= 4, "image batchsize should greater than 4, or this function can not work "
        len_batch = image.shape[0]
        width = image.shape[-1]
        height = image.shape[-2]

        output = (denormalize(image.permute((0, 2, 3, 1)).detach().to('cpu').numpy()) * 255).astype('uint8')
        photo = (denormalize(photo.permute((0, 2, 3, 1))).detach().to('cpu').numpy() * 255).astype('uint8')

       
        target = Image.new('RGB', (width + width, len_batch * height), (255, 255, 255))
        for i in range(len_batch):
            target.paste(Image.fromarray(output[i]), (0, i*height, width, (i+1)*height))
            target.paste(Image.fromarray(photo[i]), (width, i*height, 2 * width, (i+1)*height))

           
        target.save(name + "1.jpg")
    
    
    def set_lr(self,optimizer,lr):
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
            
        
if __name__ == "__main__":
    model = Train()
    model.run()

    
    
    
    
    
    
    
    
    
    