import os
import importlib
import pickle
import json
import torch
import plotly.graph_objs as go
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
toGray=transforms.Grayscale()
from kornia.color import rgb_to_ycbcr,ycbcr_to_rgb
# from ignite.metrics import SSIM
from model import DenseNet, DenseNetNew
from dataset import VISIRData, enhancedDotDataset
from einops import rearrange

gpu = 3
if torch.cuda.is_available():
        torch.cuda.set_device(gpu)
        device = 'cuda'
else:
        print('WARNING: [CUDA unavailable] Using CPU instead!')
        device = 'cpu'


# 假设 'model_path' 是保存模型状态的文件路径
model_path = '/home/ubuntu/workspace/U2Fusion-pytorch/model/model_msrs_origin.pth'  # 替换为实际的文件路径

# 加载保存的状态字典
state = torch.load(model_path)

# 创建模型实例
model = DenseNetNew().to(device)  # 假设 DenseNet 是您的模型类
# 将加载的模型参数应用到模型上
model.load_state_dict(state['model'])

dataset_msrs=enhancedDotDataset(data_pth='/home/ubuntu/workspace/data/msrs',split='test')

set_dic={
    'MSRS': dataset_msrs,
    }

"===可视化==="
from kornia import filters, enhance

canny = filters.Canny(low_threshold=0.55, high_threshold=0.6)
plot_title = ['Origin VIS',
              'Origin IR',
              'Recons Intensity',
              'Recons Edge',
              'Edge Fusion',
              'Recolor', ]


def ouput_visual(data_idx, set_name: str, set_dic: dict, plot_title: list, model):
    dataset = set_dic.get(set_name)
    vis_image, ir_image, joint_int, refedge = dataset[data_idx]
    vis_image_gray = toGray(vis_image[:3, :, :])
    ir_image = ir_image[:1, :, :]

    print("Model is on device:", device)

    print("Dataset: ", set_name)

    # 将输入数据放置到模型的设备上
    vis_image = vis_image.to(device)
    ir_image = ir_image.to(device)

    with torch.no_grad():
        # 设置输入数据到模型中
        fused_img, edge_out = model(vis_image_gray.unsqueeze(dim=0).to(device),
                                    ir_image.unsqueeze(dim=0).to(device))  # 模型输出
        fused_img = (fused_img + 1) / 2
        edge_out = (edge_out + 1) / 2

    vis_image = vis_image.cpu()
    ir_image = ir_image.cpu()
    int_out = fused_img.cpu()
    edge_out = edge_out.cpu()

    # plt.figure(figsize=(12, 5))
    # plt.subplot(2, 2, 1)
    # plt.title('int')
    # plt.imshow(int_out.squeeze(),cmap='gray')
    # plt.axis('off')

    # plt.subplot(2, 2, 2)
    # plt.title('Edge')
    # plt.imshow(edge_out.squeeze(),cmap='gray')
    # plt.axis('off')

    joint_in_rvEdge = ((1 - int_out) * edge_out)
    joint_in_noEdge = int_out * (1 - edge_out)

    fus_edge = joint_in_noEdge + joint_in_rvEdge

    vis_image = vis_image.squeeze(dim=0)
    ir_image = ir_image.squeeze(dim=0)
    ycbcr_vis = rgb_to_ycbcr(vis_image[-3:, :, :].unsqueeze(dim=0))
    recolor = torch.cat((int_out.cpu(), ycbcr_vis[:, 1:, :, :]), dim=1)
    # rgb_recolor = torch.clamp(ycbcr_to_rgb(recolor), 0, 1)
    rgb_recolor = ycbcr_to_rgb(recolor)


    # 网络输出对比
    # plt.subplot(2, 2, 3)
    # plt.title('Edge Fusion')
    # # print(fus_edge.shape)
    # plt.imshow(fus_edge.squeeze(),cmap='gray')
    # plt.axis('off')

    # plt.subplot(2, 2, 4)
    plt.title('Recolor')
    plt.imshow(rearrange(rgb_recolor.squeeze(), 'c w h -> w h c'))
    plt.axis('off')
    plt.subplots_adjust(left=0, right=1, top=1, bottom=0)  # 调整图像边距
    plt.savefig('images_output/origin_176.png')
    plt.show()

    return None

# 输出eval结果和过程
ouput_visual(data_idx=176, set_name='MSRS',
             set_dic=set_dic, plot_title=plot_title, model=model)

print('device',device)
from tqdm import trange
import torch
import numpy as np
from torchvision import transforms
import skimage as ski
from Evaluator import Evaluator
toGray=transforms.Grayscale()
