#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
from torch.optim import Adam, lr_scheduler
from torchvision import utils
# from unet import UNet
#from swin_unet import UNet_emb #UNet
#from swin_unet_transformer import enhance_net_nopool#,UNet_emb
from EnhanceN_arch import InteractNet as UHD_Net
# from EnhanceN_arch7 import InteractNet as UHD_Net
#from UHD_Net_noreuse import UHD_Net_noreuse as UHD_Net
#from swin_unet import transformer_Unet #UNet
from utils import *
import torchvision
import os
import json 
import networks


# import Myloss
import numpy as np
import torchvision
import torch.backends.cudnn as cudnn
import pytorch_ssim


#from torchvision.models import vgg16
#from perceptual import LossNetwork



class Noise2Noise(object):
    """Implementation of Noise2Noise from Lehtinen et al. (2018)."""

    def __init__(self, params, trainable):
        """Initializes model."""

        self.p = params
        self.trainable = trainable
        self._compile()


    
 
    def _compile(self):
        """Compiles model (architecture, loss function, optimizers, etc.)."""

        print('Noise2Noise: Learning Image Restoration without Clean Data (Lethinen et al., 2018)')

        # Model (3x3=9 channels for Monte Carlo since it uses 3 HDR buffers)
        if self.p.noise_type == 'mc':
            self.is_mc = True
            # self.model = UNet(in_channels=9)
            # = UHD_Net()#UNet_emb()#UNet_emb()#UNet()
            self.model=UHD_Net()#self.model.apply(self.weights_init) #########
        else:
            self.is_mc = False
            #self.DCE_Net = enhance_net_nopool()#UNet_emb()#UNet_emb()#UNet()
            self.model=UHD_Net()
        # Set optimizer and loss, if in training mode
        if self.trainable: 
            self.optim = Adam(self.model.parameters(),
                              lr=self.p.learning_rate, 
                              betas=self.p.adam[:2],
                              eps=self.p.adam[2]) 

            # Learning rate adjustment
            #self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim,
            #    patience=self.p.nb_epochs/4, factor=0.5, verbose=True)
            self.scheduler =torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(self.optim,T_0=2,T_mult=2) #CosineAnnealingLR
            
             
            
            # Loss function 
            if self.p.loss == 'hdr':
                assert self.is_mc, 'Using HDR loss on non Monte Carlo images'
                self.loss = HDRLoss()
            elif self.p.loss == 'l2':
                self.loss = nn.MSELoss()
            else:
                self.loss = nn.L1Loss()

        # CUDA support
        self.use_cuda = torch.cuda.is_available() and self.p.cuda
        if self.use_cuda:
            self.model = self.model.cuda()
            #self.DCE_Net=self.DCE_Net.cuda()
            if self.trainable:
                self.loss = self.loss.cuda()
        self.model = torch.nn.DataParallel(self.model)
        #self.DCE_Net = torch.nn.DataParallel(self.DCE_Net)

    def _print_params(self):
        """Formats parameters to print when training."""

        print('Training parameters: ')
        self.p.cuda = self.use_cuda
        param_dict = vars(self.p)
        pretty = lambda x: x.replace('_', ' ').capitalize()
        print('\n'.join('  {} = {}'.format(pretty(k), str(v)) for k, v in param_dict.items()))
        print()


    def save_model(self, epoch, stats, first=False):
        """Saves model to files; can be overwritten at every epoch to save disk space."""

        # Create directory for model checkpoints, if nonexistent
        if first:
            if self.p.clean_targets:
                ckpt_dir_name = f'{datetime.now():{self.p.noise_type}-clean-%H%M}'
            else:
                ckpt_dir_name = f'{datetime.now():{self.p.noise_type}-%m%d-%H%M}'
            if self.p.ckpt_overwrite:
                if self.p.clean_targets:
                    ckpt_dir_name = f'{self.p.noise_type}-clean'
                else:
                    ckpt_dir_name = self.p.noise_type

            self.ckpt_dir = os.path.join(self.p.ckpt_save_path, ckpt_dir_name)
            if not os.path.isdir(self.p.ckpt_save_path):
                os.mkdir(self.p.ckpt_save_path)
            if not os.path.isdir(self.ckpt_dir):
                os.mkdir(self.ckpt_dir)

        # Save checkpoint dictionary
        if self.p.ckpt_overwrite:
            fname_unet = '{}/n2n-{}.pt'.format(self.ckpt_dir, self.p.noise_type)
        else:
            valid_loss = stats['valid_loss'][epoch]
            fname_unet = '{}/n2n-epoch{}-{:>1.5f}.pt'.format(self.ckpt_dir, epoch + 1, valid_loss)
        print('Saving checkpoint to: {}\n'.format(fname_unet))
        torch.save(self.model.state_dict(), fname_unet)

        # Save stats to JSON
        fname_dict = '{}/n2n-stats.json'.format(self.ckpt_dir)
        with open(fname_dict, 'w') as fp:
            json.dump(stats, fp, indent=2)


    def load_model(self, ckpt_fname):
        """Loads model from checkpoint file."""

        print('Loading checkpoint from: {}'.format(ckpt_fname))
        if self.use_cuda:
            self.model.load_state_dict(torch.load(ckpt_fname))
        else:
            self.model.load_state_dict(torch.load(ckpt_fname, map_location='cpu'))


    def _on_epoch_end(self, stats, train_loss, epoch, epoch_start, valid_loader):
        """Tracks and saves starts after each epoch."""
        # import pdb;pdb.set_trace()
        # Evaluate model on validation set
        print('\rTesting model on validation set... ', end='')
        epoch_time = time_elapsed_since(epoch_start)[0]
        valid_loss, valid_time, valid_psnr = self.eval(valid_loader)
        show_on_epoch_end(epoch_time, valid_time, valid_loss, valid_psnr)

        # Decrease learning rate if plateau
        self.scheduler.step(valid_loss)

        # Save checkpoint
        stats['train_loss'].append(train_loss)
        stats['valid_loss'].append(valid_loss)
        stats['valid_psnr'].append(valid_psnr)
        self.save_model(epoch, stats, epoch == 0)

        

        # Plot stats
        if self.p.plot_stats:
            loss_str = f'{self.p.loss.upper()} loss'
            plot_per_epoch(self.ckpt_dir, 'Valid loss', stats['valid_loss'], loss_str)
            plot_per_epoch(self.ckpt_dir, 'Valid PSNR', stats['valid_psnr'], 'PSNR (dB)')

        #记录中途的
        train_val_folder = 'UHD-Net/log/train/val.txt'
        with open(train_val_folder, 'a') as f:
            f.write("EPOCH {:d} / {:d} \n".format(epoch + 1, self.p.nb_epochs))
            f.write("PSNR {:4f} \n".format(valid_psnr))

            
    def test(self, test_loader, show):
        """Evaluates denoiser on test set."""

        self.model.train(False)

        source_imgs = []
        denoised_imgs = []
        clean_imgs = []

        # Create directory for denoised images
        denoised_dir = os.path.dirname(self.p.data)
        save_path = os.path.join(denoised_dir, 'denoised')
        if not os.path.isdir(save_path):
            os.mkdir(save_path)

        for batch_idx, (source, target) in enumerate(test_loader):
            # Only do first <show> images
            # if show == 0 or batch_idx >= show:
            #     break
            #factor=torch.mean(target)/torch.mean(source)
            #factor=torch.full_like(target[0:1,:,:],factor)
            source_imgs.append(source)
            clean_imgs.append(target)

            if self.use_cuda:
                source = source.cuda()
                factor = factor.cuda() 
            # Denoise
            light_result,A,denoise_result = self.model(source).detach()
            #denoised_img=denoised_img-source #
            denoised_imgs.append(denoise_result)

        # Squeeze tensors 
        source_imgs = [t.squeeze(0) for t in source_imgs]
        denoised_imgs = [t.squeeze(0) for t in denoised_imgs]
        clean_imgs = [t.squeeze(0) for t in clean_imgs]

        # Create montage and save images
        print('Saving images and montages to: {}'.format(save_path))
        for i in range(len(source_imgs)):
            img_name = test_loader.dataset.data_list[i]
            image_path = img_name.replace('input','output')
            if not os.path.exists(image_path.replace('/'+image_path.split("/")[-1],'')):
                os.makedirs(image_path.replace('/'+image_path.split("/")[-1],''))
            result_path = image_path
            torchvision.utils.save_image(denoised_imgs[i:i+1], result_path)
            # import pdb;pdb.set_trace()
            # create_montage(img_name, self.p.noise_type, save_path, source_imgs[i], denoised_imgs[i], clean_imgs[i], show)

    @torch.no_grad()
    def eval(self, valid_loader):
        """Evaluates denoiser on validation set."""

        #self.DCE_Net.load_state_dict(torch.load('/mnt/lustre/cyli/pyuser/CVPR2020_Zero-DCE/CVPR2020_lowlight/snapshots_pretrained/Epoch99.pth'))
        #self.DCE_Net.eval()
        self.model.train(False) 

        valid_start = datetime.now() 
        loss_meter = AvgMeter()
        psnr_meter = AvgMeter()

        for batch_idx, (source, target1,haze_name) in enumerate(valid_loader):

            if self.use_cuda:
                source = source.cuda()
                #source_down = source_down.cuda()
                target1 = target1.cuda()
                #target2 = target2.cuda()
                #target3 = target3.cuda()
                #target_down = target_down.cuda()
                #factor = factor.cuda()

            # Denoise
           # light_enhanced,r=self.DCE_Net(source)
            final_result,final_result_down = self.model(source)

            # Update loss
            loss1 = self.loss(final_result, target1)
            #loss2 = self.loss(final_result2, target2)
            #loss3 = self.loss(final_result3, target3)
            loss=loss1#+loss2+loss3
            loss_meter.update(loss.item())

            # Compute PSRN
            if self.is_mc: 
                final_result = reinhard_tonemap(final_result)
            # TODO: Find a way to offload to GPU, and deal with uneven batch sizes
            #for i in range(self.p.batch_size):
            for i in range(1):
            #import pdb;pdb.set_trace()
                final_result = final_result.cpu()
                target1 = target1.cpu()
                psnr_meter.update(psnr(final_result[i], target1[i]).item())

        valid_loss = loss_meter.avg
        valid_time = time_elapsed_since(valid_start)[0] 
        psnr_avg = psnr_meter.avg

        return valid_loss, valid_time, psnr_avg
 

    def train(self, train_loader, valid_loader):  
        """Trains denoiser on training set.""" 
        #self.DCE_Net.load_state_dict(torch.load('/mnt/lustre/cyli/pyuser/CVPR2020_Zero-DCE/CVPR2020_lowlight/snapshots_pretrained/Epoch99.pth'))
        #self.DCE_Net.eval()
         
        self.model.train(True)  
   
        self._print_params() 
        num_batches = len(train_loader)
        #self.model.load_state_dict(torch.load('/mnt/lustre/cyli/pyuser/UHD-Net/ckpts_UHD/gaussian-0918-2132/n2n-epoch86-0.04895.pt'))
        #import pdb;pdb.set_trace()
        assert num_batches % self.p.report_interval == 0, 'Report interval must divide total number of batches'

        # vgg_model = vgg16(pretrained=True).features[:16]
        # vgg_model = vgg_model.cuda()
        # for param in vgg_model.parameters():
        #     param.requires_grad = False
        # loss_network = LossNetwork(vgg_model)
            
        # loss_network.eval()
        
        
        
        # Dictionaries of tracked stats
        stats = {'noise_type': self.p.noise_type, 
                 'noise_param': self.p.noise_param,
                 'train_loss': [],
                 'valid_loss': [], 
                 'valid_psnr': []}
                 
                 
        # L_color = Myloss.L_color() 
        # L_color_pair=Myloss.L_color_pair() 
        # L_spa = Myloss.L_spa()
        # L_exp = Myloss.L_exp_contr(8,0.6) 
        # #L_exp = Myloss.L_exp(16,0.6)
        # L_VGG = Myloss.perception_loss()
        # L_TV = Myloss.L_TV().cuda()
        # L_style_patch=Myloss.L_style_patch(16)   
        # L_style=Myloss.L_style()     
        L2=nn.MSELoss()
        VGG = networks.VGG19(init_weights='/mnt/data/cyf20/code/UHDFour_code_new_toref/pre_trained_VGG19_model/vgg19.pth', feature_mode=True)
        VGG.cuda() 
        VGG.eval()       
        # Main training loop  
        train_start = datetime.now() 
        for epoch in range(self.p.nb_epochs):
            print('EPOCH {:d} / {:d}'.format(epoch + 1, self.p.nb_epochs))
            

            # Some stats trackers
            epoch_start = datetime.now()
            train_loss_meter = AvgMeter()
            loss_meter = AvgMeter()
            time_meter = AvgMeter()

            # Minibatch SGD
            for batch_idx, (source, target) in enumerate(train_loader):
                batch_start = datetime.now()
                progress_bar(batch_idx, num_batches, self.p.report_interval, loss_meter.val)
                #factor=torch.mean(target)/torch.mean(source)

                if self.use_cuda:
                    source = source.cuda()
                    target = target.cuda()

                    #source_down = source_down.cuda()
                    #target_down = target_down.cuda()
                    #factor = factor.cuda()

                # Denoise image

                # utils.save_image(source_down, '/mnt/lustre/cyli/pyuser/UHD-Net/src/test/source_down.png')
                # utils.save_image(source, '/mnt/lustre/cyli/pyuser/UHD-Net/src/test/source.png')
                # utils.save_image(target_down, '/mnt/lustre/cyli/pyuser/UHD-Net/src/test/target_down.png')
                # utils.save_image(target, '/mnt/lustre/cyli/pyuser/UHD-Net/src/test/target.png')


                final_result,final_result_down = self.model(source)
                
 
                loss_l1 = 5*F.smooth_l1_loss(final_result, target) 
                loss_l1down = 0.5*F.smooth_l1_loss(final_result_down, F.interpolate(target,scale_factor=0.125,mode='bilinear')) 
                result_feature = VGG(final_result_down)
                target_feature = VGG(F.interpolate(target,scale_factor=0.125,mode='bilinear')) 
                loss_per = 0.001*L2(result_feature, target_feature) 
                loss_ssim=0.002*(1-pytorch_ssim.ssim(final_result, target))
                #perceptual_loss = loss_network(denoise_result, target)
                loss = loss_l1+loss_ssim+loss_per+loss_l1down #+ 0.01*perceptual_loss 

                       
                loss_final= loss  
                   
                loss_meter.update(loss_final.item())   

                # Zero gradients, perform a backward pass, and update the weights
                self.optim.zero_grad()
                loss_final.backward()  
                #torch.nn.utils.clip_grad_norm(self.model.parameters(),0.1) ###########new added
                self.optim.step()
                       
                # Report/update statistics
                time_meter.update(time_elapsed_since(batch_start)[1])
                if (batch_idx + 1) % self.p.report_interval == 0 and batch_idx:
                    show_on_report(batch_idx, num_batches, loss_meter.avg, time_meter.avg)
                    train_loss_meter.update(loss_meter.avg)
                    loss_meter.reset() 
                    time_meter.reset() 
                #if batch_idx==10:
                #    break

                print("total", ":", loss_final.item(),  "loss_l1", ":", loss_l1.item(),"loss_ssim", ":", loss_ssim.item())
                train_folder = 'UHD-Net/log/train/3.txt'
                with open(train_folder, 'a') as f:
                    f.write("EPOCH {:d} / {:d} \n".format(epoch + 1, self.p.nb_epochs))
                    f.write("batch {:d} / {:d} \n".format(batch_idx, num_batches))
                    f.write("total:  {:4f} loss_l1 {:4f} loss_ssim {:4f}\n".format(loss_final.item(), loss_l1.item(),loss_ssim.item()))
                #import pdb
                #pdb.set_trace() 
            # Epoch end, save and reset tracker
            self._on_epoch_end(stats, train_loss_meter.avg, epoch, epoch_start, valid_loader)
            train_loss_meter.reset()
            #import pdb
            #pdb.set_trace() 
        train_elapsed = time_elapsed_since(train_start)[0]
        print('Training done! Total elapsed time: {}\n'.format(train_elapsed))
 

class HDRLoss(nn.Module): 
    """High dynamic range loss.""" 
  
    def __init__(self, eps=0.01): 
        """Initializes loss with numerical stability epsilon."""

        super(HDRLoss, self).__init__()
        self._eps = eps 


    def forward(self, denoised, target):
        """Computes loss by unpacking render buffer."""

        loss = ((denoised - target) ** 2) / (denoised + self._eps) ** 2
        return torch.mean(loss.view(-1))

 
