import torch
from read_config import read_file
from datalist import *
from torch.utils.data import DataLoader
from models import *
from utils import *
import os
from functools import partial
import datetime
import shutil
from PIL import Image
from torchvision import transforms
CASE = [
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\2.png',
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\20240313234445.jpg',
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\PICA0000.jpg' ,
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\PICA0001.jpg',
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\PICA00071.png',
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\2.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\17.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\38.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\46.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\50.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\58.jpg',  
    r'C:\Users\luotianhang\Desktop\MagicGAN\hardcase\60.jpg',  
        ]  
class TrainBase:
    def __init__(self,yml_file) -> None:
        self.args = read_file(yml_file)
        print(f"-----------{self.args['project_name']}-----------")

        
        use_cuda = self.args['use_cuda'] and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        num_workers = self.args['num_workers']
        pin_memory = self.args['pin_memory']
        kwargs = {"num_workers": num_workers, "pin_memory": pin_memory}
        
        datasets_config = self.args['datasets']
        self.train_dataloader = []
        for dc in datasets_config:
            batch_size = dc.pop('batch_size')
            dataset = eval(dc.pop('type'))(**dc)
            self.train_dataloader.append(
                        DataLoader(
                            dataset,
                            batch_size=batch_size,
                            shuffle=self.args['shuffle'],
                            collate_fn=eval(self.args['collate_fn']),
                            drop_last= self.args['drop_last'],
                            **kwargs
                                )
                        )

        
        self.models = {}
        for model_infor in self.args['models']:
            name = model_infor.pop('name')
            resume_checkpoint = model_infor.pop('resume_checkpoint',None)
            self.models[name] = torch.nn.DataParallel(eval(model_infor.pop('type'))(**model_infor))

            if resume_checkpoint is not None:
                model_state_dict = torch.load(resume_checkpoint,map_location='cpu').to(self.device)
                self.models[name].load_state_dict(model_state_dict['model_state_dict'],False)
                if 'epoch' in model_state_dict and 'epoch' not in self.args.keys():
                    self.args['epoch'] = model_state_dict['epoch']
                    
            else:
                print(f"{self.models[name]} train from scratch")
        
        self.optims = {}
        for optim_info in self.args['optims']:
            name = optim_info.pop('name')
            model_names = optim_info.pop('model_parameter')
            parameters = [self.models[model_name].parameters()  for model_name in model_names]
            self.optims[name] = eval(optim_info.pop('type'))(
                                                            [
                                                            {'params':parameter,**optim_info}
                                                            for parameter in parameters
                                                            ]
                                                            ) 
        self.losses = {}
        for loss_info in self.args['losses']:
            name = loss_info.pop('name')
            self.losses[name]=eval(loss_info.pop('type'))(**loss_info)
        self.args['workspace']=self.args['workspace']+f"_{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"    
        yml_file_name = yml_file.split('/')[-1]
        yml_save_path = os.path.join('weights',self.args['workspace'],yml_file_name)
        yml_save_dir = os.path.dirname(yml_save_path)
        if not os.path.exists(yml_save_dir):
            os.makedirs(yml_save_dir)
        shutil.copy(yml_file,yml_save_path)
    def pretrain(self,epoch):
        raise NotImplementedError()
    def train(self,epoch):
        raise NotImplementedError()
    def test(self,epoch):
        raise NotImplementedError()
    def save(self,epoch,extra_info,prefix='weights',ingores=['Discriminator']):
        for k in self.models.keys():
            flag = False
            for ingore in ingores:
                if ingore in k:
                    flag = True
            
            if flag:
                continue
                
            save_path = os.path.join(os.getcwd(),prefix,self.args['workspace'],extra_info,str(epoch),k+'.pth')
            
            save_dir = os.path.dirname(save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            
            torch.save(
                {'model_state_dict':self.models[k].state_dict(),
                 'epoch':epoch},
                save_path
            )
            self.inference(save_dir=save_dir)
    
    @torch.no_grad()
    def inference(self,save_dir): 
        save_path = os.path.join(save_dir,'test_result')
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        self.models['UnetGenerator'].eval()
        for index in range(len(CASE)):
            image = Image.open(CASE[index]).convert('RGB')
            image = image.resize([256,256])
            
            T = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
            ])
            image = T(image).unsqueeze(0).to(self.device)
            result = self.models['UnetGenerator'].module.forward_gan(image)
            save_path = os.path.join(save_dir,'test_result',str(index)+'.png')
            
            self.get_image(result,image,save_path)
    @staticmethod
    def get_image(gen_image,real_image,save_path):
        
        if not gen_image.shape[0]>=1:
            return
        len_batch = gen_image.shape[0]
        width = gen_image.shape[-1]
        height = gen_image.shape[-2]

        gen_output = ((denormalize(gen_image.permute((0, 2, 3, 1))).detach().to('cpu').numpy()) * 255).astype('uint8')
        real_output = ((denormalize(real_image.permute((0, 2, 3, 1))).detach().to('cpu').numpy()) * 255).astype('uint8')

       
        target = Image.new('RGB', (width + width, len_batch * height), (255, 255, 255))
        for i in range(len_batch):
            target.paste(Image.fromarray(gen_output[i]), (0, i*height, width, (i+1)*height))
            target.paste(Image.fromarray(real_output[i]),((i+1)*width, i*height, (i+2)*width, (i+1)*height))
        
        save_path_dir = os.path.dirname(save_path)
        if not os.path.exists(save_path_dir):
            os.makedirs(save_path_dir)
        target.save(save_path)                
    def work(self):
        self.save(-1,extra_info='train',ingores=[])
        resume_pretrain_epochs = self.args.pop('resume_pretrain_epochs',0)
        for epoch in range(resume_pretrain_epochs,self.args['pretrain_epochs'],1):
            self.pretrain(epoch)
            self.save(epoch,extra_info='pretrain')
        resume_train_epochs = self.args.pop('resume_train_epochs',0)
        for epoch in range(resume_train_epochs,self.args['epochs'],1):
            self.train(epoch)
            self.save(epoch,extra_info='train',ingores=[])
            

from tqdm import tqdm

class WhiteBoxCartoonGan(TrainBase):
    def __init__(self, yml_file) -> None:
        super().__init__(yml_file)
        
        self.color_shift = ColorShift(device=self.device)
        self.superpixel_fn = partial(sscolor,seg_num=200)
    def pretrain(self, epoch):
        
        self.models['UnetGenerator'].train()
        
        pbar = tqdm(zip(*self.train_dataloader),desc=f"PreTrain Epoch:{epoch}/{self.args['pretrain_epochs']}")
        
        for mem in pbar:
            all_mem_collect = {}
            for m in mem:
                for k in m.keys():
                    if k not in ['gan']:
                        continue
                    if k not in all_mem_collect.keys():
                        all_mem_collect[k] = {'data':m[k]['data'].to(self.device),'gt':m[k]['gt'].to(self.device)}
                    else:
                        all_mem_collect[k]['data']=torch.cat([all_mem_collect[k]['data'],m[k]['data'].to(self.device)],dim=0)
                        all_mem_collect[k]['gt']=torch.cat([all_mem_collect[k]['gt'],m[k]['gt'].to(self.device)],dim=0)
                    
            self.optims['g_optim'].zero_grad()
            gan,_,_ = self.models['UnetGenerator'](all_mem_collect)
            fake_image = gan
            image = all_mem_collect['gan']['data']
            loss = self.losses['MSELoss'](fake_image,image)
            loss.backward()
            self.optims['g_optim'].step()
            pbar.set_description(
                f"PreTrain Epoch: {epoch}/{self.args['epochs']} " +
                f"Gloss:{round(loss.item(),4)} "
            )
    def train(self,epoch):
        self.models['UnetGenerator'].train()
        self.models['Discriminator_blur'].train()
        self.models['Discriminator_texture'].train()
        
        pbar = tqdm(zip(*self.train_dataloader),desc=f"Train Epoch:{epoch}/{self.args['pretrain_epochs']}")
        
        for mem in pbar:
            all_mem_collect = {}
            for m in mem:
                for k in m.keys():
                    if k not in all_mem_collect.keys():
                        all_mem_collect[k] = {'data':m[k]['data'].to(self.device),'gt':m[k]['gt'].to(self.device)}
                                            #   'other':m[k]['other'].to(self.device)}
                    else:
                        all_mem_collect[k]['data']=torch.cat([all_mem_collect[k]['data'],m[k]['data'].to(self.device)],dim=0)
                        all_mem_collect[k]['gt']=torch.cat([all_mem_collect[k]['gt'],m[k]['gt'].to(self.device)],dim=0)
                        # all_mem_collect[k]['other']=torch.cat([all_mem_collect[k]['other'],m[k]['other'].to(self.device)],dim=0)

            # trian generator       
            total_generator_loss =  torch.tensor(0,device=self.device)
            self.optims['g_optim'].zero_grad()
            gan,facelandmark,seg = self.models['UnetGenerator'](all_mem_collect)
            if gan is not None:
                fake_image = gan
                image = all_mem_collect['gan']['data']
                
                blur_fake = guided_filter(fake_image,fake_image,r=5,eps=2e-1)
                d_blur_fake = self.models['Discriminator_blur'](blur_fake)
                
                gray_fake = self.color_shift(fake_image)
                d_gray_fake = self.models['Discriminator_texture'](gray_fake)
                
                fake_content = self.losses['VGGloss'](fake_image)
                gt_content = self.losses['VGGloss'](image)
                
                generator_blur_loss = self.args['generator_blur_weight']*self.losses['MSELoss'](d_blur_fake,torch.ones_like(d_blur_fake))
                generator_texture_loss = self.args['generator_texture_weight']*self.losses['MSELoss'](d_gray_fake,torch.ones_like(d_gray_fake))
                
                _, c, h, w = fake_content.shape
                content_loss = self.args['content_weight']*self.losses['L1Loss'](fake_content,gt_content) / (c*h*w) 
                tv_loss = self.args['tv_weight']*self.losses['TVLoss'](fake_image)
                generator_loss = generator_blur_loss+generator_texture_loss+content_loss+tv_loss
                
                # fake_seg = self.losses['SegLoss'](fake_image)[0]
                # image_seg = all_mem_collect['gan']['other']
                
                # seg_loss = self.losses['MSELoss'](fake_seg,image_seg)
                
                
                total_generator_loss = total_generator_loss+generator_loss 
            facelandmark_loss = torch.tensor(0,device=self.device)   
            if facelandmark is not None:
                foutput = facelandmark
                landmark_gt = all_mem_collect['facelandmark']['gt']
                facelandmark_loss = self.losses['L1Loss'](foutput,landmark_gt)
                total_generator_loss = total_generator_loss +facelandmark_loss
            segloss = torch.tensor(0,device=self.device)
            if seg is not None:
                segloss = self.losses['CrossEntropyLoss'](seg,all_mem_collect['seg']['gt'].long())

                total_generator_loss = total_generator_loss+segloss
            total_generator_loss.backward()
            self.optims['g_optim'].step()
            
            
            self.optims['d_optim'].zero_grad()
            
            gan,facelandmark,seg = self.models['UnetGenerator'](all_mem_collect)
            fake_image = gan
            gt = all_mem_collect['gan']['gt']
            blur_fake = guided_filter(fake_image,fake_image,r=5,eps=2e-1)
            blur_gt = guided_filter(gt,gt,r=5,eps=2e-1)
            d_blur_fake = self.models['Discriminator_blur'](blur_fake)
            d_blur_gt = self.models['Discriminator_blur'](blur_gt)
            gray_fake = self.color_shift(fake_image)
            gray_real = self.color_shift(gt)
            
            d_gray_fake = self.models['Discriminator_texture'](gray_fake)
            d_gray_real = self.models['Discriminator_texture'](gray_real)
            
            discrimnator_blur_loss = self.losses['MSELoss'](d_blur_fake,torch.zeros_like(d_blur_fake))+self.losses['MSELoss'](d_blur_gt,torch.ones_like(d_blur_gt))
            discrimnator_gray_loss = self.losses['MSELoss'](d_gray_fake,torch.zeros_like(d_gray_fake))+self.losses['MSELoss'](d_gray_real,torch.ones_like(d_gray_real))
            
            discrimnator_loss = discrimnator_blur_loss+discrimnator_gray_loss
            
            discrimnator_loss.backward()
            self.optims['d_optim'].step()
            
            
            pbar.set_description(
                    f"Train Epoch: {epoch}/{self.args['epochs']} \t"
                    f"Gloss:{round(total_generator_loss.item(),2)} \t"
                    f"Dloss:{round(discrimnator_loss.item(),2)} \t"
                    f"Generator_loss:{round(generator_loss.item(),2)}\t"
                    f"Segment_loss:{round(segloss.item(),2)}\t"
                    f"Landmarkloss:{round(facelandmark_loss.item(),2)}\t"
                    f"ContentLoss:{round(content_loss.item(),8)} \t"
                  
            )
      
      
class WhiteBoxCartoonGan2(WhiteBoxCartoonGan):
    def __init__(self, yml_file) -> None:
        super().__init__(yml_file)
    def pretrain(self, epoch):
        
        self.models['UnetGenerator'].train()
        
        pbar = tqdm(zip(*self.train_dataloader),desc=f"PreTrain Epoch:{epoch}/{self.args['pretrain_epochs']}")
        
        for mem in pbar:
            all_mem_collect = {}
            for m in mem:
                for k in m.keys():
                    if k not in ['gan']:
                        continue
                    if k not in all_mem_collect.keys():
                            all_mem_collect[k] = {'data':m[k]['data'].to(self.device),'gt':m[k]['gt'].to(self.device),'other':m[k]['other'].to(self.device)}
                    else:
                        all_mem_collect[k]['data']=torch.cat([all_mem_collect[k]['data'],m[k]['data'].to(self.device)],dim=0)
                        all_mem_collect[k]['gt']=torch.cat([all_mem_collect[k]['gt'],m[k]['gt'].to(self.device)],dim=0)
                        all_mem_collect[k]['other'] = torch.cat([all_mem_collect[k]['other'],m[k]['other'].to(self.device)],dim=0)
            self.optims['g_optim'].zero_grad()
            gan,_,_ = self.models['UnetGenerator'](all_mem_collect)
            fake_image = gan
            image = all_mem_collect['gan']['data']
            loss = self.losses['MSELoss'](fake_image,image)
            
            # fake_seg = self.losses['SegLoss'](fake_image)[0]
            # image_seg = all_mem_collect['gan']['other']
            
            # seg_loss = self.losses['CrossEntropyLoss'](fake_seg,image_seg.long())
            
            loss = loss 
            loss.backward()
            self.optims['g_optim'].step()
            pbar.set_description(
                f"PreTrain Epoch: {epoch}/{self.args['epochs']} " +
                f"Gloss:{round(loss.item(),4)} "
            )
    def train(self,epoch):
        self.models['UnetGenerator'].train()
        self.models['Discriminator_blur'].train()
        self.models['Discriminator_texture'].train()
        
        pbar = tqdm(zip(*self.train_dataloader),desc=f"PreTrain Epoch:{epoch}/{self.args['pretrain_epochs']}")
        
        for mem in pbar:
            all_mem_collect = {}
            for m in mem:
                for k in m.keys():
                    if k not in all_mem_collect.keys():
                        all_mem_collect[k] = {'data':m[k]['data'].to(self.device),'gt':m[k]['gt'].to(self.device),'other':m[k]['other'].to(self.device)}
                    else:
                        all_mem_collect[k]['data']=torch.cat([all_mem_collect[k]['data'],m[k]['data'].to(self.device)],dim=0)
                        all_mem_collect[k]['gt']=torch.cat([all_mem_collect[k]['gt'],m[k]['gt'].to(self.device)],dim=0)
                        all_mem_collect[k]['other'] = torch.cat([all_mem_collect[k]['other'],m[k]['other'].to(self.device)],dim=0)
            loss_result_dict = {}
            # trian generator       
            total_generator_loss =  torch.tensor(0,device=self.device)
            self.optims['g_optim'].zero_grad()
            gan,facelandmark,seg = self.models['UnetGenerator'](all_mem_collect)
            if gan is not None:
                fake_image = gan
                image = all_mem_collect['gan']['data']
                
                blur_fake = guided_filter(fake_image,fake_image,r=5,eps=2e-1)
                d_blur_fake = self.models['Discriminator_blur'](blur_fake)
                
                gray_fake = self.color_shift(fake_image)
                d_gray_fake = self.models['Discriminator_texture'](gray_fake)
                
                fake_content = self.losses['VGGloss'](fake_image)
                gt_content = self.losses['VGGloss'](image)
                
                generator_blur_loss = self.args['generator_blur_weight']*self.losses['MSELoss'](d_blur_fake,torch.ones_like(d_blur_fake))
                generator_texture_loss = self.args['generator_texture_weight']*self.losses['MSELoss'](d_gray_fake,torch.ones_like(d_gray_fake))
                
                _, c, h, w = fake_content.shape
                content_loss = self.args['content_weight']*self.losses['L1Loss'](fake_content,gt_content) / (c*h*w) 
                tv_loss = self.args['tv_weight']*self.losses['TVLoss'](fake_image)
                generator_loss = generator_blur_loss+generator_texture_loss+content_loss+tv_loss
                
                fake_seg = self.losses['SegLoss'](fake_image)[0]
                image_seg = all_mem_collect['gan']['other']
                
                seg_loss = self.losses['CrossEntropyLoss'](fake_seg,image_seg.long())*0.
                
                
                total_generator_loss = total_generator_loss+generator_loss + seg_loss 
                loss_result_dict['content loss'] = round(content_loss.item(),8)
                loss_result_dict['generator loss'] = round(total_generator_loss.item(),2)
            facelandmark_loss = torch.tensor(0,device=self.device)   
            if facelandmark is not None:
                foutput = facelandmark
                landmark_gt = all_mem_collect['facelandmark']['gt']
                facelandmark_loss = self.losses['L1Loss'](foutput,landmark_gt)
                total_generator_loss = total_generator_loss +facelandmark_loss
                loss_result_dict['facelandmark loss'] = round(facelandmark_loss.items(),2)
            segloss = torch.tensor(0,device=self.device)
            if seg is not None:
                segloss = self.losses['CrossEntropyLoss'](seg,all_mem_collect['seg']['gt'].long())
                total_generator_loss = total_generator_loss+segloss
                loss_result_dict['segloss'] = round(segloss.item(),2)
            total_generator_loss.backward()
            self.optims['g_optim'].step()
            
            
            self.optims['d_optim'].zero_grad()
            
            gan,facelandmark,seg = self.models['UnetGenerator'](all_mem_collect)
            fake_image = gan
            gt = all_mem_collect['gan']['gt']
            blur_fake = guided_filter(fake_image,fake_image,r=5,eps=2e-1)
            blur_gt = guided_filter(gt,gt,r=5,eps=2e-1)
            d_blur_fake = self.models['Discriminator_blur'](blur_fake)
            d_blur_gt = self.models['Discriminator_blur'](blur_gt)
            gray_fake = self.color_shift(fake_image)
            gray_real = self.color_shift(gt)
            
            d_gray_fake = self.models['Discriminator_texture'](gray_fake)
            d_gray_real = self.models['Discriminator_texture'](gray_real)
            
            discrimnator_blur_loss = self.losses['MSELoss'](d_blur_fake,torch.zeros_like(d_blur_fake))+self.losses['MSELoss'](d_blur_gt,torch.ones_like(d_blur_gt))
            discrimnator_gray_loss = self.losses['MSELoss'](d_gray_fake,torch.zeros_like(d_gray_fake))+self.losses['MSELoss'](d_gray_real,torch.ones_like(d_gray_real))
            
            discrimnator_loss = discrimnator_blur_loss+discrimnator_gray_loss
            
            discrimnator_loss.backward()
            self.optims['d_optim'].step()
            loss_result_dict['discrimnator loss'] = round(discrimnator_loss.item(),2)
            loss_output = ''
            for k,v in loss_result_dict.items():
                loss_output = loss_output + k + ' ' + str(v) + '\t' 
            
            pbar.set_description(
                    f"Train Epoch: {epoch}/{self.args['epochs']} \t"
                    +loss_output
            )
            loss_result_dict.clear()
if __name__ == "__main__":
    # model = WhiteBoxCartoonGan2('configs/3.yml')     
    # model.work()
    
    yml_file = 'configs/pixar/9.yml'
    model = eval(read_file(yml_file)['architecture'])(yml_file)
    model.work()