import torch
import time
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
import random
import os

def seed_all(seed_value):
    random.seed(seed_value) # Python
    np.random.seed(seed_value) # cpu vars
    torch.manual_seed(seed_value) # cpu vars
    
    if torch.cuda.is_available(): 
        torch.cuda.manual_seed(seed_value)
        torch.cuda.manual_seed_all(seed_value) # gpu vars
        torch.backends.cudnn.enabled = True
        torch.backends.cudnn.deterministic = True  #needed
        torch.backends.cudnn.benchmark = True

seed_all(42)

import datasets
import Evaluation
from networks.gan import Generator
from networks.myloss import my_MSELoss
import utils.utils as utils
from utils.drawer import LossHistory
from utils.logger import setup_logger
from utils.lr_scheduler import get_scheduler


class Trainer:

    def __init__(self, cfg):
        # param
        self.cfg = cfg

        os.environ["CUDA_VISIBLE_DEVICES"] = str(self.cfg.default_gpu)
        self.device = torch.device('cuda')

        # self.scheduler_step_size = 3   #help="step size of the scheduler",
        # self.num_workers = 4
  
        # model
        self.generator = Generator(self.cfg).cuda()

        self.optim_G = {
            'adam'  : optim.Adam(self.generator.parameters(), lr=self.cfg.lr),
            'sgd'   : optim.SGD(self.generator.parameters(), lr=self.cfg.lr, momentum=self.cfg.momentum, weight_decay = self.cfg.weight_decay),
            'adamw' : optim.AdamW(self.generator.parameters(), lr=self.cfg.lr,  weight_decay = self.cfg.weight_decay)
        }[self.cfg.optimizer]
        # self.optim_G = optim.Adam(self.generator.parameters(), lr = self.cfg.lr)
     
        # self.G_lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(self.optim_G, mode='max', factor=0.5, patience=4)
        # self.G_lr_scheduler = get_scheduler(optimizer, len(train_loader), args)

        if cfg.train:
            self.logger = setup_logger(output=cfg.log_dir+'/train_log.log', name="dibr")
            self.draw = LossHistory(cfg.log_dir)
        else:
            self.logger = setup_logger(output=cfg.log_dir+'/test_log.log', name="dibr")

        # TODO add auto loss
        if self.cfg.loss == 'mse':
            self.mseloss= torch.nn.MSELoss()
        else:
            self.mseloss= my_MSELoss()

        self.logger.info(self.generator.__class__.__name__)

        total_params = sum(p.numel() for p in self.generator.parameters())
        self.logger.info(f'{total_params:,} total parameters.')

        total_trainable_params = sum(p.numel() for p in self.generator.parameters() if p.requires_grad)
        self.logger.info(f'{total_trainable_params:,} training parameters.')

    def logtime(self, batch_idx,loss,psnr,ssim):
        """Print a logging statement to the terminal
        """
        # samples_per_sec = self.batch_size / duration
        # time_sofar = time.time() - self.start_time
        # training_time_left = (
            # self.num_total_steps / self.step - 1.0) * time_sofar if self.step > 0 else 0
        print_string = "batch:{:>2}|loss:{:.5f}|psnr:{:.5f}|psnrin:{:.5f}"
        self.logger.info(print_string.format(batch_idx,loss,psnr,ssim))
    
    def run_epoch(self):
        """Run a single epoch of training and validation"""
        # losssum = 0
        # idx = 0
        avg_loss = 0
        avg_psnr = 0
        avg_psnrin = 0
        for batch_idx, inputs in enumerate(self.train_loader):
            for key, ipt in inputs.items():
                inputs[key] = ipt.to(self.device)
            
            self.start_time = time.time()
            outputs, loss_G = self.process_batch_G(inputs)
            # losssum = losssum + loss_G
            
            targets = inputs['target']
            picture = inputs['inputs'][:,1,0:3,...]*0.5 + inputs['inputs'][:,1,3:6,...]*0.5

            avg_loss += loss_G
            avg_psnr += Evaluation.psnr(outputs,targets).item()
            avg_psnrin += Evaluation.psnr(picture,targets).item()
            
            # idx = idx + 1
            if(batch_idx % 100 == 0 and batch_idx!= 0):   
                # psnr = Evaluation.psnr(outputs,targets).item()
                # psnrin = Evaluation.psnr(picture,targets).item()
                self.logtime(
                    batch_idx, avg_loss/(batch_idx+1), avg_psnr/(batch_idx+1), avg_psnrin/(batch_idx+1))
                # self.save_model()
                # idx = 0
                # losssum = 0
            
            if self.cfg.lr_scheduler != 'Plateau':
                self.G_lr_scheduler.step()
        
        return avg_loss/len(self.train_loader), avg_psnr/len(self.train_loader)
        
    def process_batch_G(self, inputs):
        self.optim_G.zero_grad()

        fake_imgs = self.generator(inputs)
        loss_G = self.compute_losses(fake_imgs,inputs['target'])
        
        loss_G.backward()
        self.optim_G.step()
        
        return fake_imgs, loss_G.item()
    
    def datasetselect(self,dataname,flag):
        datasselect={'Ballet':datasets.Ballet,'Shark':datasets.Shark,'Rena':datasets.Rena,
                     'Akko':datasets.AkkoandKayo,'Eth3D': datasets.Eth3D
                      }
        Dataset = datasselect[dataname]
        dataset1 =  Dataset(self.cfg,flag)
        shuffle_1 = True
        if dataname == 'All_data':
            shuffle_1 = False
        
        def seed_worker(worker_id):
            worker_seed = torch.initial_seed() % 2**32
            np.random.seed(worker_seed)

        def seed_worker(worker_id):
            worker_seed = torch.initial_seed() % 2**32
            np.random.seed(worker_seed)

        if flag == 'train':
            data_loader = DataLoader(
                dataset1, self.cfg.batch_size, shuffle=shuffle_1,worker_init_fn=seed_worker,
                num_workers=self.cfg.num_workers, pin_memory=True, drop_last=True)     #
        if flag == 'test' or flag == 'all':
            data_loader = DataLoader(
                dataset1, 1, shuffle=False, num_workers=self.cfg.num_workers, 
                worker_init_fn=seed_worker, pin_memory=True, drop_last=True)     #
        
        return data_loader,dataset1
    
    def compute_losses(self,outputs,targets):
        loss1 = self.mseloss(outputs,targets)
        return loss1
    
    def train(self):
        dataset = self.cfg.dataset
        self.train_loader,traindataset = self.datasetselect(dataset,'train')
        self.logger.info("training set:" +dataset+"| len of loader: {}".format(len(traindataset)))

        self.G_lr_scheduler = get_scheduler(self.optim_G, len(self.train_loader), self.cfg)
        self.logger.info("loaded lr_scheduler{}".format(self.cfg.lr_scheduler))
        maxpsnr=0
        # self.num_total_steps = len(traindataset) // self.batch * self.num_epochs
        
        if self.cfg.checkpoint_path is not None:
           self.load_model_v2()
        else:
            self.generator.train()

        for num_epoch in range(self.cfg.max_epoch):
            self.logger.info("epoch: {}/{}".format(num_epoch,self.cfg.max_epoch))
            avg_loss, avg_psnr = self.run_epoch()
            
            if(num_epoch % self.cfg.val_freq == 0 or num_epoch == 399):
                psnr = self.val(dataset)
                if maxpsnr < psnr:
                    maxpsnr = psnr
                    self.logger.info('epoch:{} | maxpsnr:{} '.format(num_epoch, maxpsnr))
                    self.logger.info('current learning rate {}'.format(self.optim_G.param_groups[0]['lr']))
                    self.save_model('generator_best')
                else:
                    if self.cfg.lr_scheduler == 'Plateau':
                        self.G_lr_scheduler.step(maxpsnr)
                    self.logger.info('current learning rate {}'.format(self.optim_G.param_groups[0]['lr']))

            if num_epoch > 2:
                self.draw.append_loss(avg_psnr, avg_loss, psnr)

        self.save_model('generator_last')
        self.logger.info("train over")

    def save_model(self, name='generator'):
        torch.save(self.generator.state_dict(), self.cfg.log_dir+"/{}.pth".format(name))
    
    def load_model(self, name='generator'):
        self.generator.load_state_dict(torch.load(name, map_location='cpu'))

    def load_model_v2(self):
        assert os.path.exists(self.cfg.checkpoint_path)

        if self.cfg.checkpoint_path != '':
        
            model_dict      = self.generator.state_dict()
            # map_location 
            pretrained_dict = torch.load(self.cfg.checkpoint_path, map_location = self.device)
            
            load_key, no_load_key, temp_dict = [], [], {}
            for k, v in pretrained_dict.items():
                if k in model_dict.keys() and np.shape(model_dict[k]) == np.shape(v):
                    temp_dict[k] = v
                    load_key.append(k)
                else:
                    no_load_key.append(k)
            model_dict.update(temp_dict)
            self.generator.load_state_dict(model_dict)
            #------------------------------------------------------#
            #   ubloaded Key
            #------------------------------------------------------#
            self.logger.info("\nSuccessful Load Key:{},……\nSuccessful Load Key Num:{}".format(str(load_key)[:500], len(load_key)))
            self.logger.info("\nFail To Load Key:{}, ……\nFail To Load Key num:{}".format(str(no_load_key)[:500], len(no_load_key)))
            # print("\n\033[1;33;44m温馨提示,head部分没有载入是正常现象,Backbone部分没有载入是错误的。\033[0m")
            
            self.generator.train()  
    
    def val(self,dataset):
        test_loder,testdataset = self.datasetselect(dataset,'test')
        self.logger.info("val start")
        
        psnr = 0
        self.generator.eval()
        for batch_idx, inputs in enumerate(test_loder):
            for key, ipt in inputs.items():
                inputs[key] = ipt.to(self.device)
            inputs1 = inputs['inputs']
            target = inputs['target']
            with torch.no_grad():
                fake_imgs = self.generator( inputs)
            psnr += Evaluation.psnr(fake_imgs,target).item()
            if(batch_idx == 600):
                break

        psnr1 = psnr/(batch_idx+1)
        self.logger.info("psnr:{:.5f}".format( psnr1))
        
        self.generator.train()
        
        return psnr1
    
    def test_pre(self,dataset,flag='test'):
        self.num_workers=0
        test_loder,testdataset=self.datasetselect(dataset,flag)
        self.logger.info("testing set: "+dataset+"| len of loader: {}".format(len(testdataset)))
        try:
           self.load_model(name=self.cfg.checkpoint_path)
        except:
            self.load_model(name='generator')
        self.generator.eval()
        psnrin=0
        psnr=0
        ssim=0
        ssimin=0
        for batch_idx, inputs in enumerate(test_loder):
            for key, ipt in inputs.items():
                inputs[key] = ipt.to(self.device)
            inputs1=inputs['inputs'][:,1,...]
            target=inputs['target']
            picture= inputs1[:,0:3,...]*0.5+inputs1[:,3:6,...]*0.5
            with torch.no_grad():
               fake_imgs=self.generator(inputs)    
            psnrin+=Evaluation.psnr(picture,target).item()
            ssimin+=Evaluation.ssim(picture,target).item()
            psnr+=Evaluation.psnr(fake_imgs,target).item()
            ssim+=Evaluation.ssim(fake_imgs,target).item()
            if(batch_idx%100==0):
                psnrin2=psnrin/(batch_idx+1)
                ssimin2=ssimin/(batch_idx+1)
                psnr2=psnr/(batch_idx+1)
                ssim2=ssim/(batch_idx+1)
                self.logger.info("batch:{:>2}|psnr:{:.5f}  ssim:{:.5f}".format(batch_idx,psnr2,ssim2))
                self.logger.info("batch:{:>2}|psnrin:{:.5f}  ssimin:{:.5f}".format(batch_idx,psnrin2,ssimin2))
        psnr1=psnr/(batch_idx+1)
        ssim1=ssim/(batch_idx+1)
        psnrin1=psnrin/(batch_idx+1)
        ssimin1=ssimin/(batch_idx+1)
        self.logger.info("psnr:{:.5f}  ssim:{:.5f}".format(psnr1,ssim1))
        self.logger.info("psnrin:{:.5f}  ssimin:{:.5f}".format(psnrin1,ssimin1))
    def test_show(self,dataset,flag='test'):
        self.num_workers=0
        test_loder,testdataset=self.datasetselect(dataset,flag)
        self.logger.info("testing set:"+dataset+"  {}".format(len(testdataset)))
        try:
           self.load_model(name=self.cfg.checkpoint_path)
        except:
            self.load_model(name='generator')
        self.generator.eval()
        psnr=0
        ssim=0
        index=[9,19,29]
        psnrsum=[]
        for i in index:
            inputs=testdataset[ i]
            for key, ipt in inputs.items():
                inputs[key] = ipt.to(self.device).unsqueeze(0)
            inputs1=inputs['inputs'][:,1,...]
            target=inputs['target']
            picture= inputs1[:,0:3,...]
            # *0.5+inputs1[:,3:6,...]*0.5
            with torch.no_grad():
                fake_imgs=self.generator(inputs)
            utils.tensor_to_PIL(picture,'pictures{}'.format( i))
            utils.tensor_to_PIL(fake_imgs,'fake_imgs_time{}'.format( i))
            utils.tensor_to_PIL(target,'target{}'.format(i))          
            psnr=Evaluation.psnr(fake_imgs,target).item()
            ssim+=Evaluation.ssim(fake_imgs,target).item()
            psnrsum.append(psnr)
            self.logger.info(psnr)
    def evaluation_in(self,picture,target):
        psnrin=Evaluation.psnr(picture,target).item()
        return psnrin
    def evaluation_in_test(self,dataset):
        test_loder,testdataset=self.datasetselect(dataset,flag='test')
        self.logger.info("testing set:"+dataset+"  {}".format(len(testdataset)))
        psnrin=0
        for batch_idx, inputs in enumerate(test_loder):
            for key, ipt in inputs.items():
                inputs[key] = ipt.to(self.device)
            inputs1=inputs['inputs']
            target=inputs['targets']
            picture= inputs1[:,0:3,...]
            psnrin+=self.evaluation_in(picture,target)
            if  batch_idx==200:
                break
        psnrin=psnrin/(batch_idx+1)
        self.logger.info("batch:{:>2}|psnrin:{:.5f}".format(batch_idx,psnrin))
    def test(self):
        dataset=self.cfg.dataset
        # if self.cfg['testing']['testshow']:
        #     mytest=self.test_show
        # else: 
        #     mytest=self.test_pre
        
        mytest=self.test_pre

        if  dataset!='All_data':
            mytest(dataset)
        else:
            datasets1=datasets.All_data('test')
            self.logger.info(datasets1)
            for dataset in datasets1:
                mytest(dataset,flag='test')
        self.logger.info('testing over')

    