import os
import importlib
import pickle
import json
import torch
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
toGray=transforms.Grayscale()
from kornia.color import rgb_to_ycbcr,ycbcr_to_rgb
# from ignite.metrics import SSIM
from model import DenseNet, DenseNetNew
from dataset import VISIRData, enhancedDotDataset
from einops import rearrange

gpu = 3
if torch.cuda.is_available():
        torch.cuda.set_device(gpu)
        device = 'cuda'
else:
        print('WARNING: [CUDA unavailable] Using CPU instead!')
        device = 'cpu'


# 假设 'model_path' 是保存模型状态的文件路径
model_path = '/home/ubuntu/workspace/U2Fusion-pytorch/model/model_road.pth'  # 替换为实际的文件路径

# 加载保存的状态字典
state = torch.load(model_path)

# 创建模型实例
model = DenseNetNew().to(device)  # 假设 DenseNet 是您的模型类
# 将加载的模型参数应用到模型上
model.load_state_dict(state['model'])
# model.load_state_dict(state)
import torchvision.transforms as transforms
# -------------------加载数据集
'''vis_image = torch.cat((vis_image_raw,vis_sobel,vis_laplace,vis_image_equalized),dim=0)'''
# dataset_msrs=VISIRData(set_prefix='/home/ubuntu/workspace/data/msrs',split='test')
#dataset_msrs=enhancedDotDataset(data_pth='/home/ubuntu/workspace/data/msrs',split='test')
dataset_road = VISIRData(set_prefix='/home/ubuntu/workspace/data/roadscene',split='test')
#dataset_m3fd=AuxDatasetTest(data_pth='../data/M3FD_Fusion')
#dataset_road=AuxDatasetTest(data_pth='../data/roadscene')
#dataset_tno=AuxDatasetTest(data_pth='../data/tno')

set_dic={
    #'MSRS': dataset_msrs,
    'ROAD': dataset_road,

    }

"===可视化==="
from kornia import filters, enhance

canny = filters.Canny(low_threshold=0.55, high_threshold=0.6)
plot_title = ['Origin VIS',
              'Origin IR',
              'Recons Intensity',
              'Recons Edge',
              'Edge Fusion',
              'Recolor', ]


'''def ouput_visual(data_idx, set_name: str, set_dic: dict, plot_title: list, model):
    dataset = set_dic.get(set_name)
    vis_image, ir_image, refedge = dataset[data_idx]
    vis_image_gray = toGray(vis_image[:3, :, :])
    ir_image = ir_image[:1, :, :]
    # print(vis_image.shape)  # [8, 480, 640]
    # print(ir_image.shape)  # [3, 480, 640]
    # if set_name == 'TNO':
    #     data_VIS = data_VIS.expand(3, -1, -1)  # tno的vis只有一个维度

    # device = model.device  # 模型所在的设备
    print("Model is on device:", device)

    print("Dataset: ", set_name)

    # Convert to single channel if necessary
    # if vis_image.shape[0] == 3:  # Check if it is a 3-channel image
    #     vis_image = torch.mean(vis_image, dim=0, keepdim=True)  # Convert to grayscale by taking the mean

    # if ir_image.shape[0] == 3:  # Check if it is a 3-channel image
    #     ir_image = torch.mean(ir_image, dim=0, keepdim=True)  # Convert to grayscale by taking the mean：[3, 480, 640]→[1, 480, 640]

    # print(vis_image.shape)  # [8, 480, 640]
    # print(ir_image.shape)  # [1, 480, 640]
    # 将输入数据放置到模型的设备上
    vis_image = vis_image.to(device)
    ir_image = ir_image.to(device)

    with torch.no_grad():
        # 设置输入数据到模型中
        # print(vis_image.shape)  # [8, 480, 640]
        # print(ir_image.shape)  # [1, 480, 640]
        # vis_image = vis_image.unsqueeze(dim=0)
        # ir_image = ir_image.unsqueeze(dim=0)
        fused_img, edge_out = model(vis_image_gray.unsqueeze(dim=0).to(device),
                                    ir_image.unsqueeze(dim=0).to(device))  # 模型输出
        # fused_img = (fused_img + 1) / 2
        # edge_out = (edge_out + 1) / 2

        # 验证fused_img和edge_out的结果是否在[0,1]上
        print('fused_img - max:', torch.max(fused_img).item(), 'min:', torch.min(fused_img).item())
        print('edge_out - max:', torch.max(edge_out).item(), 'min:', torch.min(edge_out).item())

    vis_image = vis_image.cpu()
    ir_image = ir_image.cpu()
    int_out = fused_img.cpu()
    edge_out = edge_out.cpu()

    joint_in_rvEdge = ((1 - int_out) * edge_out)
    joint_in_noEdge = int_out * (1 - edge_out)

    fus_edge = joint_in_noEdge + joint_in_rvEdge

    vis_image = vis_image.squeeze(dim=0)
    ir_image = ir_image.squeeze(dim=0)
    # 获取ycbcr格式的vis
    ycbcr_vis = rgb_to_ycbcr(vis_image[-3:, :, :].unsqueeze(dim=0))
    # ycbcr_vis[:, 1:, :, :]选择ycbcr_vis张量的第2个通道（Cb）及第3个通道（Cr）的数据
    # 并与fus_edge进行拼接，即“上色”
    recolor = torch.cat((fus_edge.cpu(), ycbcr_vis[:, 1:, :, :]), dim=1)
    # recolor之后再转换成rgb,clamp的范围是[0,1]
    # rgb_recolor = torch.clamp(ycbcr_to_rgb(recolor), 0, 1)
    rgb_recolor = ycbcr_to_rgb(recolor)

    #print('fused_img - max:', torch.max(int_out).item(), 'min:', torch.min(int_out).item())

    # 网络输出对比
    # plt.figure(figsize=(12, 5))
    # plt.subplot(1, 2, 1)
    # plt.title('Edge Fusion')
    # # print(fus_edge.shape)
    # plt.imshow(fus_edge.squeeze(),cmap='gray')
    # plt.axis('off')

    # plt.subplot(1, 2, 2)
    # plt.title('Recolor')
    plt.imshow(rearrange(rgb_recolor.squeeze(), 'c w h -> w h c'))
    plt.axis('off')
    plt.subplots_adjust(left=0, right=1, top=1, bottom=0)  # 调整图像边距
    plt.savefig('images_output/wave_176.png')
    plt.show()
    # plt_net_out_v3(plot_title,
    #                vis_image[:3, :, :],
    #                ir_image[:1, :, :],
    #                int_out,
    #                edge_out,
    #                fus_edge,
    #                rgb_recolor)
    return None'''

# 输出eval结果和过程
'''ouput_visual(data_idx=176, set_name='MSRS',
             set_dic=set_dic, plot_title=plot_title, model=model)'''

print('device',device)
from tqdm import trange
import torch
import numpy as np
from torchvision import transforms
import skimage as ski
from Evaluator import Evaluator
toGray=transforms.Grayscale()


def metrics(set_name: str, set_dic: dict, model):
    dataset = set_dic.get(set_name)
    len_set = len(dataset)
    with torch.no_grad():
        metric_result = np.zeros((8))
        for i in trange(len_set):
            vis_image, ir_image, refedge = dataset[i]
            vis_image_gray = toGray(vis_image[:3, :, :])
            ir_image = ir_image[:1, :, :]
            fused_img, edge_out = model(vis_image_gray.unsqueeze(dim=0).to(device),
                                        ir_image.unsqueeze(dim=0).to(device))
            vis_image = vis_image.cpu()
            ir_image = ir_image.cpu()
            int_out = fused_img.cpu()
            edge_out = edge_out.cpu()
            #print('vis min',torch.max(ir_image))
            
            joint_in_rvEdge = ((1 - int_out)* edge_out)
            joint_in_noEdge = int_out * (1 - edge_out)
            fus_edge=joint_in_noEdge + joint_in_rvEdge
            #print(int_out.max())
            '''vi=ski.util.img_as_ubyte(toGray(vis_image).squeeze().numpy()).astype('float32')
            ir=ski.util.img_as_ubyte(ir_image.squeeze().numpy()).astype('float32')
            fi=ski.util.img_as_ubyte(int_out.squeeze().numpy()).astype('float32')'''
            vi=ski.util.img_as_ubyte(vis_image_gray.squeeze().numpy()).astype('float32')
            ir=ski.util.img_as_ubyte(ir_image.squeeze().numpy()).astype('float32')
            fi=ski.util.img_as_ubyte(int_out.squeeze().numpy()).astype('float32')
            metric_result += np.array([Evaluator.EN(fi), Evaluator.SD(fi)
                                                , Evaluator.SF(fi), Evaluator.MI(fi, ir, vi)
                                                , Evaluator.SCD(fi, ir, vi), Evaluator.VIFF(fi, ir, vi)
                                                , Evaluator.Qabf(fi, ir, vi), Evaluator.SSIM(fi, ir, vi)])
            
            '''vi=np.round(cv2.cvtColor(np.transpose(vis_image[:3,:,:].numpy(), (1, 2, 0)), cv2.COLOR_BGR2GRAY)*255)
            ir=np.round(ir_image[0,:,:].squeeze().numpy()*255)
            fi=np.round(fus_edge.squeeze().numpy()*255)
            metric_result += np.array([Evaluator.EN(fi), Evaluator.SD(fi)
                                                , Evaluator.SF(fi), Evaluator.MI(fi, ir, vi)
                                                , Evaluator.SCD(fi, ir, vi), Evaluator.VIFF(fi, ir, vi)
                                                , Evaluator.Qabf(fi, ir, vi), Evaluator.SSIM(fi, ir, vi)])'''
        metric_result /= len_set
    print('Dataset: ',set_name)
    print('\t'+'EN: '+str(np.round(metric_result[0], 2))
                    +', SD: '+str(np.round(metric_result[1], 2))
                    +', SF: '+str(np.round(metric_result[2], 2))
                    +', MI: '+str(np.round(metric_result[3], 2))
                    +', SCD: '+str(np.round(metric_result[4], 2))
                    +', VIFF: '+str(np.round(metric_result[5], 2))
                    +', Qabf: '+str(np.round(metric_result[6], 2))
                    +', SSIM: '+str(np.round(metric_result[7], 2))
                    )
    return None

metrics(set_name='ROAD',set_dic=set_dic,model=model)