import torch
from read_config import read_file
from torch.utils.data import DataLoader
import datetime
import os
import shutil
from torchvision import transforms
from PIL import Image
from utils import normalize,denormalize,check_creat_path,mv_path


class TrainBase:
    def __init__(self,yml_file) -> None:
        self.args = read_file(yml_file)
        print(f"-----------{self.args['project_name']}-----------")

        
        use_cuda = self.args['use_cuda'] and torch.cuda.is_available()
        self.device = torch.device('cuda' if use_cuda else 'cpu')

        num_workers = self.args['num_workers']
        pin_memory = self.args['pin_memory']
        kwargs = {"num_workers": num_workers, "pin_memory": pin_memory}
        
        # 构建dataloader pipeline
        datasets_config = self.args['datasets']
        self.train_dataloader = []
        for dc in datasets_config:
            batch_size = dc.pop('batch_size')
            dataset = eval(dc.pop('type'))(**dc)
            self.train_dataloader.append(
                        DataLoader(
                            dataset,
                            batch_size=batch_size,
                            shuffle=self.args['shuffle'],
                            collate_fn=eval(self.args['collate_fn']),
                            drop_last= self.args['drop_last'],
                            **kwargs
                                )
                        )

        # 构建model pipeline
        self.models = {}
        for model_infor in self.args['models']:
            name = model_infor.pop('name')
            resume_checkpoint = model_infor.pop('resume_checkpoint',None)
            self.models[name] = torch.nn.DataParallel(eval(model_infor.pop('type'))(**model_infor))

            if resume_checkpoint is not None:
                self.models[name].load_state_dict(torch.load(resume_checkpoint,map_location='cpu')['model_state_dict'],False)
            else:
                print(f"{self.models[name]} train from scratch")
        
        # 构建optim pipeline
        self.optims = {}
        for optim_info in self.args['optims']:
            name = optim_info.pop('name')
            model_names = optim_info.pop('model_parameter')
            parameters = [self.models[model_name].parameters()  for model_name in model_names]
            self.optims[name] = eval(optim_info.pop('type'))(
                                                            [
                                                            {'params':parameter,**optim_info}
                                                            for parameter in parameters
                                                            ]
                                                            )
        # 构建loss pipeline 
        self.losses = {}
        for loss_info in self.args['losses']:
            name = loss_info.pop('name')
            self.losses[name]=eval(loss_info.pop('type'))(**loss_info)
            
        # cp config文件至对应目录
        self.args['workspace']=self.args['workspace']+f"_{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')}"    
        yml_file_name = yml_file.split('/')[-1]
        yml_save_path = os.path.join('weights',self.args['workspace'],yml_file_name)
        check_creat_path(yml_save_path)
        mv_path(yml_file,yml_save_path)
        

    def pretrain(self,epoch):
        raise NotImplementedError()
    def train(self,epoch):
        raise NotImplementedError()
    def test(self,epoch):
        raise NotImplementedError()
    def save(self,epoch,extra_info,prefix='weights',ingores=['Discriminator']):
        for k in self.models.keys():
            flag = False
            for ingore in ingores:
                if ingore in k:
                    flag = True
            
            if flag:
                continue
                
            save_path = os.path.join(os.getcwd(),prefix,self.args['workspace'],extra_info,str(epoch),k+'.pth')
            
            save_dir = os.path.dirname(save_path)
            if not os.path.exists(save_dir):
                os.makedirs(save_dir)
            
            torch.save(
                {'model_state_dict':self.models[k].state_dict(),
                 'epoch':epoch},
                save_path
            )
            self.inference(save_dir=save_dir)
    
    @torch.no_grad()
    def inference(self,save_dir,CASE): 
        save_path = os.path.join(save_dir,'test_result')
        check_creat_path(save_path)
        self.models['UnetGenerator'].eval()
        for index in range(len(CASE)):
            image = Image.open(CASE[index]).convert('RGB')
            image = image.resize([256,256])
            
            T = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize((0.5,0.5,0.5),(0.5,0.5,0.5))
            ])
            image = T(image).unsqueeze(0).to(self.device)
            result = self.models['UnetGenerator'].module.forward_gan(image)
            save_path = os.path.join(save_dir,'test_result',str(index)+'.png')
            
            self.get_image(result,image,save_path)
    @staticmethod
    def get_image(gen_image,real_image,save_path):
        
        if not gen_image.shape[0]>=1:
            return
        len_batch = gen_image.shape[0]
        width = gen_image.shape[-1]
        height = gen_image.shape[-2]

        gen_output = ((denormalize(gen_image.permute((0, 2, 3, 1))).detach().to('cpu').numpy()) * 255).astype('uint8')
        real_output = ((denormalize(real_image.permute((0, 2, 3, 1))).detach().to('cpu').numpy()) * 255).astype('uint8')

       
        target = Image.new('RGB', (width + width, len_batch * height), (255, 255, 255))
        for i in range(len_batch):
            target.paste(Image.fromarray(gen_output[i]), (0, i*height, width, (i+1)*height))
            target.paste(Image.fromarray(real_output[i]),((i+1)*width, i*height, (i+2)*width, (i+1)*height))
        
        save_path_dir = os.path.dirname(save_path)
        if not os.path.exists(save_path_dir):
            os.makedirs(save_path_dir)
        target.save(save_path)                
    def work(self):
        self.save(-1,extra_info='train',ingores=[])
        for epoch in range(self.args['resume_pretrain_epochs'],self.args['pretrain_epochs'],1):
            self.pretrain(epoch)
            self.save(epoch,extra_info='pretrain')
        for epoch in range(self.args['resume_train_epochs'],self.args['epochs'],1):
            self.train(epoch)
            self.save(epoch,extra_info='train',ingores=[])