import os
import cv2
import math
import time
import torch
import random
import matplotlib
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.utils.data as data
import torchvision.transforms as transforms
toGray=transforms.Grayscale()

from vgg import *
from utils import *
from option import args
from model import DenseNet, DenseNetNew
from pytorch_msssim import ssim
from dataset import MEFdataset, TestData, VISIRData, enhancedDotDataset
from torch.optim import Adam, lr_scheduler
from torch.utils.tensorboard import SummaryWriter
from pytorch_wavelets import DWTForward, DWTInverse
from tqdm import tqdm,trange
EPS = 1e-8
c = 3500

para={
    'coeff_edge': 6,  # 0.01
    'coeff_fus_swin': 1.2,  # 4.88
    'coeff_vgg': 2.4,  # 2.46
    'coeff_decom': 0,  # 0.97
    'coeff_wavelet': 2  # 0.01
    }


'''
# model2_1
    para={
    'coeff_edge': 0.05,           
    'coeff_fus_swin': 1.2, #初始16
    'coeff_vgg': 0.2, #初始1.6
    'coeff_decom': 4.5, #初始0.5
    }'''
'''
para={
    'coeff_edge': 0.05,           
    'coeff_fus_swin': 1.2, #初始16
    'coeff_vgg': 0.2, #初始1.6
    'coeff_decom': 5.5, #初始0.5
    }
'''

gpu = 5
if torch.cuda.is_available():
        torch.cuda.set_device(gpu)
        device = 'cuda'
else:
        print('WARNING: [CUDA unavailable] Using CPU instead!')
        device = 'cpu'

from kornia.losses import SSIMLoss
from kornia.filters import SpatialGradient
from kornia.color import rgb_to_grayscale,ycbcr_to_rgb, rgb_to_ycbcr,grayscale_to_rgb
from kornia.enhance import equalize, invert
from SwinFusion.models.loss_vif import fusion_loss_vif,fusion_loss_vifv1
from vgg_loss import vgg_loss_fun,get_vgg_model
from trans_loss import dcom_loss

"===定义wavelet_Loss==="
def dwt2(image, wavelet='db1', mode='zero', level=2, device=device):
    """进行二维离散小波变换"""
    dwt_fn = DWTForward(J=level, wave=wavelet, mode=mode).to(device)
    return dwt_fn(image)


def idwt2(coeffs, wavelet='db1', mode='zero', device=device):
    """进行二维逆离散小波变换"""
    dwt_fn = DWTInverse(wave=wavelet, mode=mode).to(device)
    return dwt_fn(coeffs)


def image_fusion(vi_coeffs, ir_coeffs):
    # 直接访问低频子带 (yl) 和高频子带 (yh)
    yl_vi, yh_vi = vi_coeffs
    yl_ir, yh_ir = ir_coeffs

    # 处理低频子带 (yl)
    fused_yl = torch.where(torch.abs(yl_vi) > torch.abs(yl_ir), yl_vi, yl_ir)

    # 处理高频子带 (yh)
    fused_yh = []
    for i in range(len(yh_vi)):  # 假设我们有两个层级的高频子带
        fused_subband = []
        for j in range(len(yh_vi[i])):  # 对于每个层级，处理所有的高频子带
            fused_subband.append(torch.where(torch.abs(yh_vi[i][j]) > torch.abs(yh_ir[i][j]), yh_vi[i][j], yh_ir[i][j]))
        fused_yh.append(tuple(fused_subband))

    # 返回融合后的系数
    return (fused_yl, tuple(fused_yh))


def wavelet_loss(coeffs1, coeffs2, level):
    loss = 0

    # 处理低频子带 (yl)
    yl1 = coeffs1[0]
    yl2 = coeffs2[0]
    loss += torch.mean((yl1 - yl2) ** 2)

    # 处理高频子带 (yh)
    yh1 = coeffs1[1]  # 这是一个列表，每个元素是一个包含三个张量的元组
    yh2 = coeffs2[1]

    for i in range(level):
        yh1_level = yh1[i]
        yh2_level = yh2[i]

        for c1, c2 in zip(yh1_level, yh2_level):
            loss += torch.mean((c1 - c2) ** 2)

    return loss


def compute_wavelet_loss(vi_img_tensor, ir_img_tensor, fu_img_tensor, level=2, device=device):
    # 使用 DWT 替代 NSCT
    vi_coeffs = dwt2(vi_img_tensor, wavelet='db1', mode='zero', level=level, device=device)
    ir_coeffs = dwt2(ir_img_tensor, wavelet='db1', mode='zero', level=level, device=device)
    fu_coeffs = dwt2(fu_img_tensor, wavelet='db1', mode='zero', level=level, device=device)

    # 图像融合
    fused_coeffs = image_fusion(vi_coeffs, ir_coeffs)

    # 计算损失
    loss = wavelet_loss(fused_coeffs, fu_coeffs, level=2)  # 近似子带损失

    return loss

MSELoss = nn.MSELoss()  
L1Loss = nn.L1Loss()
Loss_ssim = SSIMLoss(11, reduction='mean')
#criteria_fusion = Fusionloss()
swinLoss=fusion_loss_vif()
swinLossv1=fusion_loss_vifv1()
vgg_model=get_vgg_model(device=device).eval()
 
#print(vgg_model)
def loss_fun(ref_vis,ref_ir,ref_edge,int_out,edge_out):
    #---输入处理
    gray_vis=ref_vis
    y_vis=ref_vis
    #int_out_rgb=ycbcr_to_rgb(torch.cat((int_out,ycbcr_vis[:,1:,:,:]),dim=1))

    joint_in_rvEdge = ((1 - int_out)* ref_edge)
    joint_in_noEdge = int_out * (1 - ref_edge)
    #fus_edge=joint_in_noEdge+joint_in_rvEdge
    fus_edge = torch.clamp(joint_in_noEdge + joint_in_rvEdge, 0, 1)

    #ycbcr_vis=rgb_to_ycbcr(ref_vis)
    #int_out_rgb=ycbcr_to_rgb(torch.cat((fus_edge,ycbcr_vis[:,1:,:,:]),dim=1))
    #---计算损失


    edge_loss=L1Loss(ref_edge,edge_out)+MSELoss(ref_edge,edge_out)
    swin_loss,_,_,_=swinLoss(gray_vis,ref_ir,fus_edge)
    vgg_loss=vgg_loss_fun(grayscale_to_rgb(equalize(gray_vis)),grayscale_to_rgb(int_out),vgg_model)
    decom_loss=dcom_loss(fus_edge,gray_vis)+dcom_loss(fus_edge,invert(ref_ir))
    wavelet_loss = compute_wavelet_loss(gray_vis, ref_ir, int_out, level=2)

    # print('decom_loss',decom_loss.item())
    # print('vgg_loss',vgg_loss.item())
    # print('swin_loss',swin_loss.item())
    # print('edge_loss',edge_loss.item())
    # print('wavelet_loss', wavelet_loss.item())


    loss =+ para['coeff_edge'] * edge_loss \
          + para['coeff_fus_swin'] * swin_loss\
          + para['coeff_wavelet'] * wavelet_loss \
          + para['coeff_decom'] * decom_loss\
          + para['coeff_vgg'] * vgg_loss\

          
    

    return loss
class Train(object):
    def __init__(self):
        
        self.num_epochs = args.epochs
        self.lr = args.lr

        #self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5], std=[0.5])])
        self.transform = transforms.Compose([transforms.ToTensor()])
        # self.train_set = VISIRData(set_prefix='/home/ubuntu/workspace/data/msrs',split='train')
        #self.train_set = enhancedDotDataset(data_pth='/home/ubuntu/workspace/data/msrs',split='train')
        self.train_set = VISIRData(set_prefix='/home/ubuntu/workspace/data/roadscene',split='train')
        self.train_loader = data.DataLoader(self.train_set, batch_size=args.batch_size,
                                            shuffle=True, num_workers=4, pin_memory=False,drop_last=True)

        self.model = DenseNetNew().cuda()
        self.feature_model = vgg16().cuda()
        self.feature_model.load_state_dict(torch.load('vgg16.pth'))
        self.optimizer = Adam(self.model.parameters(), lr=self.lr)
        self.scheduler = lr_scheduler.ExponentialLR(self.optimizer, gamma=0.9)
        #self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', patience=lr_patience,factor=lr_factor)
        self.loss_mse = nn.MSELoss(reduction='mean').cuda()
        self.train_loss_1 = []
        self.train_loss_2 = []
        if args.validation:
            self.val_list = []
            self.best_psnr = 0

    def train(self):
        seed = args.seed
        random.seed(seed)
        torch.manual_seed(seed)
        writer = SummaryWriter(log_dir=args.log_dir, filename_suffix='train_loss')
        running_loss=0.0
        '''if os.path.exists(args.model_path + args.model):
            print('===> Loading pre-trained model......')
            state = torch.load(args.model_path + args.model)
            self.model.load_state_dict(state['model'])
            self.train_loss_1 = state['train_loss_1']
            self.train_loss_2 = state['train_loss_2']'''
            # self.lr = state['lr']

        for ep in trange(self.num_epochs):
            ep_loss_1 = []
            ep_loss_2 = []
            train_acc_loss=0
            for batch, (over, under, refedge) in enumerate(self.train_loader):
                # 把超出[0,1]的结果去掉，over和under的结果都是[0,1]
                over = over[:, -3:, :, :]
                under = under[:, :1, :, :]
                # print('y1 - max:', torch.max(over).item(), 'min:', torch.min(over).item())
                # print('y2 - max:', torch.max(under).item(), 'min:', torch.min(under).item())
                over = toGray(over).cuda()
                under = under.cuda()

                with torch.no_grad():
                    feat_1 = torch.cat((over, over, over), dim=1)
                    feat_1 = self.feature_model(feat_1)
                    feat_2 = torch.cat((under, under, under), dim=1)
                    feat_2 = self.feature_model(feat_2)

                    for i in range(len(feat_1)):
                        m1 = torch.mean(features_grad(feat_1[i]).pow(2), dim=[1, 2, 3])
                        m2 = torch.mean(features_grad(feat_2[i]).pow(2), dim=[1, 2, 3])
                        if i == 0:
                            w1 = torch.unsqueeze(m1, dim=-1)
                            w2 = torch.unsqueeze(m2, dim=-1)
                        else:
                            w1 = torch.cat((w1, torch.unsqueeze(m1, dim=-1)), dim=-1)
                            w2 = torch.cat((w2, torch.unsqueeze(m2, dim=-1)), dim=-1)
                    weight_1 = torch.mean(w1, dim=-1) / c
                    weight_2 = torch.mean(w2, dim=-1) / c
                    weight_list = torch.cat((weight_1.unsqueeze(-1), weight_2.unsqueeze(-1)), -1)
                    weight_list = F.softmax(weight_list, dim=-1)

                over = over.cuda()
                under = under.cuda()
                refedge = refedge.cuda()
                self.optimizer.zero_grad()
                torch.cuda.synchronize()
                start_time = time.time()
                fused_img, edgeout = self.model(over, under)
                # [-1,1]
                # print('y1 - max:', torch.max(fused_img).item(), 'min:', torch.min(fused_img).item())
                # print('y2 - max:', torch.max(edgeout).item(), 'min:', torch.min(edgeout).item())
                torch.cuda.synchronize()
                end_time = time.time()
                adjfused_img = (fused_img + 1) / 2

                # adjover = (over + 1) / 2
                # adjunder = (under + 1) / 2
                adjover = over
                adjunder = under

                loss_1 = weight_list[:, 0] * (1 - ssim(adjfused_img, adjover, nonnegative_ssim=True)) \
                         + weight_list[:, 1] * (1 - ssim(adjfused_img, adjunder, nonnegative_ssim=True))
                loss_1 = torch.mean(loss_1)

                loss_2 = weight_list[:, 0] * self.loss_mse(adjfused_img, adjover) \
                         + weight_list[:, 1] * self.loss_mse(adjfused_img, adjunder)
                loss_2 = torch.mean(loss_2)
                #loss_3 = loss_fun(over, under, refedge, fused_img, edgeout)
                loss = loss_1 + 20*loss_2  # loss origin
                #loss = loss_1 + 20*loss_2 + 0.1*loss_3  # loss new
                #loss=loss_3
                ep_loss_1.append(loss_1.item())
                ep_loss_2.append(loss_2.item())
                loss.backward()
                self.optimizer.step()
                train_acc_loss=train_acc_loss+loss.item()
                if batch % 50 == 0 and batch != 0:
                    print('Epoch:{}\tcur/all:{}/{}\tLoss_1:{:.4f}\tLoss_2:{:.4f}\tLoss_3:{:.4f}\t'
                          'Time:{:.2f}s'.format(ep + 1, batch,
                                                len(self.train_loader),
                                                loss_1.item(),
                                                loss_2.item(),
                                                loss_3.item(),
                                                end_time - start_time))
                    '''print('Epoch:{}\tcur/all:{}/{}\tLoss_3:{:.4f}\t'
                          'Time:{:.2f}s'.format(ep + 1, batch,
                                                len(self.train_loader),
                                                loss_3.item(),
                                                end_time - start_time))'''

            self.scheduler.step()
            #self.scheduler.step(running_loss)
            self.train_loss_1.append(np.mean(ep_loss_1))
            self.train_loss_2.append(np.mean(ep_loss_2))

            state = {
                'model': self.model.state_dict(),
                'train_loss_1': self.train_loss_1,
                'train_loss_2': self.train_loss_2,
                'lr': self.optimizer.param_groups[0]['lr']
            }
            torch.save(state, args.model_path + args.model)
            '''if ep % 5 == 0:
                torch.save(state, args.model_path + str(ep) + '.pth')'''
            

            lr = self.optimizer.param_groups[0]['lr']
            writer.add_scalar('ssim_loss', np.mean(ep_loss_1), ep)
            writer.add_scalar('mse_loss', np.mean(ep_loss_2), ep)
            writer.add_scalar('lr', lr, ep)

            if args.train_test:
                t = Test(ep)
                t.test()
        matplotlib.use('Agg')
        fig1 = plt.figure()
        plot_loss_list_1 = self.train_loss_1
        plt.plot(plot_loss_list_1)
        plt.savefig('train_loss_curve_1.png')
        fig2 = plt.figure()
        plot_loss_list_2 = self.train_loss_2
        plt.plot(plot_loss_list_2)
        plt.savefig('train_loss_curve_2.png')
        print('===> Finished Training!')


class Test(object):
    def __init__(self, ep=None):
        self.ep = ep
        self.transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean=[0.5, 0.5, 0.5],
                                                                                         std=[0.5, 0.5, 0.5])])
        self.batch_size = 1
        self.test_set = TestData(self.transform)
        self.test_loader = data.DataLoader(self.test_set, batch_size=1, shuffle=False,
                                           num_workers=0, pin_memory=False)
        self.model = DenseNet().cuda()
        self.state = torch.load(args.model_path + args.model)
        self.model.load_state_dict(self.state['model'])

    def test(self):
        self.model.eval()
        with torch.no_grad():
            for batch, imgs in enumerate(self.test_loader):
                print('Processing picture No.{}'.format(batch + 1))
                imgs = torch.squeeze(imgs, dim=0)
                img1_y = imgs[0:1, 0:1, :, :].cuda()
                img2_y = imgs[1:2, 0:1, :, :].cuda()

                img_cr = imgs[:, 1:2, :, :].cuda()
                img_cb = imgs[:, 2:3, :, :].cuda()
                w_cr = (torch.abs(img_cr) + EPS) / torch.sum(torch.abs(img_cr) + EPS, dim=0)
                w_cb = (torch.abs(img_cb) + EPS) / torch.sum(torch.abs(img_cb) + EPS, dim=0)
                fused_img_cr = torch.sum(w_cr * img_cr, dim=0, keepdim=True).clamp(-1, 1)
                fused_img_cb = torch.sum(w_cb * img_cb, dim=0, keepdim=True).clamp(-1, 1)

                fused_img_y = self.model(img1_y, img2_y)
                fused_img = torch.cat((fused_img_y, fused_img_cr, fused_img_cb), dim=1)
                fused_img = (fused_img + 1) * 127.5
                fused_img = fused_img.squeeze(0)
                fused_img = fused_img.cpu().numpy()
                fused_img = np.transpose(fused_img, (1, 2, 0))
                fused_img = fused_img.astype(np.uint8)
                fused_img = cv2.cvtColor(fused_img, cv2.COLOR_YCrCb2BGR)

                if self.ep:
                    save_path = args.save_dir + str(self.ep) + '_epoch/'
                else:
                    save_path = args.save_dir

                if not os.path.exists(save_path):
                    os.makedirs(save_path)
                cv2.imwrite((save_path + str(batch + 1) + args.ext), fused_img)
            print('Finished testing!')
