from flask import Blueprint, request, send_file
import os
from PIL import Image
import torch
import torchvision
import torch.optim
import os
import numpy as np
from PIL import Image
from matplotlib import pyplot as plt
from torch.utils.data import DataLoader
from tqdm import tqdm
import pandas as pd
import datetime
import requests

from entity.Response import success,error

from DCENet import model as Dce

from MambaNet.Models.OTEModel import Model
from MambaNet.Config import config
from MambaNet.utils.APIs.APIDataset import APIDataset
from MambaNet.utils.APIs.APIEncode import api_encode
from MambaNet.utils.APIs.APIMetric import api_metric
from MambaNet.utils.DataProcess import Processor
from MambaNet.utils.common import train_val_split,save_model
from MambaNet.Trainer import Trainer

from pytorch_grad_cam import GradCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image

from db import db
from config import BaseConfig
import pymysql.cursors
from entity.Configuration import Configuration

from socketIO import socketIO as socketio
from threading import Lock
SocketThread = None
thread_lock = Lock()
# 模型可以继续训练吗
modelContinue = False

modelController = Blueprint('modelController',__name__)

BATCH_IMAGE_UPLOAD_FOLDER = './controller/LocalBatchImage/'
IMAGE_UPLOAD_FOLDER = './controller/LocalImage/'
CSV_UPLOAD_FOLDER =  './controller/LocalCsv/'

IMAGE_RESULT_FOLDER = './controller/ResultImage/'
CSV_RESULT_FOLDER = './controller/ResultCsv/'

device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

# 提前构建亮度增强网络
scale_factor = 1
DECNet_weight = './DCENet/snapshot_epochs/Epoch49.pth'
DCE_net = Dce.enhance_net_nopool(scale_factor).to(device)
DCE_net.load_state_dict(torch.load(DECNet_weight))
DCE_net.eval()

# 提前构建多模态网络 并加载权重

# 这里不使用flask_sql来查询 直接用原生pysql查询
pyDB = pymysql.connect(host=BaseConfig.HOST,
                     user=BaseConfig.USERNAME,
                     password=BaseConfig.PASSWORD,
                     database=BaseConfig.DBNAME)

cursor = pyDB.cursor()
sql = 'select * from configuration'
cursor.execute(sql)
modelConfiguratino=cursor.fetchone()
pyDB.close()

mambaModel = Model(config).to(device)
load_model_path = './MambaNet/save_models/CMAT/' + modelConfiguratino[1]
mambaModel.load_state_dict(torch.load(load_model_path))
mambaModel.eval()
# 拿到cv部分的单模态网络
cv_model = mambaModel.img_model

# 用户的自定义权重
customConfig = {}

@socketio.on('connect', namespace='/model')
def model_connect():
    print('namespace=model连接成功')
    pass

@socketio.on('receiveParam',namespace='/model')
def receive_param(args):
    global customConfig,modelContinue,SocketThread
    customConfig = args['configForm']
    modelContinue = True
    with thread_lock:
        if SocketThread is None:
            SocketThread = socketio.start_background_task(target=background_thread)

@socketio.on('modelStop',namespace='/model')
def model_stop():
    global modelContinue
    modelContinue = False
    print('namespace=model停止训练')
    pass

@socketio.on('startBatchDiagnosis',namespace='/model')
def start_batch_diagnosis(args):
    print('start batch diagnosis')
    csvFilename = args['csvFilename']
    fileType = csvFilename.split('.')[1]
    csv_path = CSV_UPLOAD_FOLDER + csvFilename
    if fileType == 'csv':
        df = pd.read_csv(csv_path)
    elif fileType == 'xlsx':
        # 将Excel文件读取成 ExcelFile 格式
        xls = pd.ExcelFile(csv_path)

        # 获取sheet表的名称
        exchanges = xls.sheet_names
        df = pd.read_excel(csv_path, sheet_name=exchanges[0])
    else:
        return error(message='请上传CSV/XLSX文件')

    # dfResult是要保存成csv的结果 每个字段都要保留 同时新增8列模型预测
    dfResult = df
    predCol = [[],[],[],[],[],[],[],[]]
    # 设定的阈值
    threshold = 0.8
    print('----- [Batch Loading]')
    for index, row in tqdm(df.iterrows()):
        data = []
        left_image_filename, right_image_filename = row['Left-Fundus'], row['Right-Fundus']

        left_image_path = BATCH_IMAGE_UPLOAD_FOLDER + '/' + left_image_filename
        right_image_path = BATCH_IMAGE_UPLOAD_FOLDER + '/' + right_image_filename

        formData = {
            "leftFilename": left_image_filename,
            "rightFilename": right_image_filename
        }
        headers = {
            'Content-Type': 'application/json' 
        }
        response = requests.post('http://yiyuy.nat300.top/getImageDescription', json=formData, headers=headers)
        response = response.json()
        describe = response['data']
        print(describe)

        label = df.loc[index, ['N','D','G','C','A','H','M','O']]
        label = label.to_numpy(dtype=np.float32)
        label = torch.tensor(label)

        data.append((left_image_path, right_image_path, describe, label))
        ##################转化为Dataloader####################
        dataset_inputs = api_encode(data,config,'test')
        dataset = APIDataset(*dataset_inputs)
        dataloader = DataLoader(dataset=dataset, **config.checkout_params, collate_fn=dataset.collate_fn, drop_last=False)

        ##################预测###############################
        finalIndex = 0
        abbreviationList = ['N','D','G','C','A','H','M','O']
        for batch in tqdm(dataloader, desc='----- [Batch Predicting] '):
            with torch.no_grad():
                texts, texts_mask, imgs, labels = batch
                texts, texts_mask, imgs = texts.to(device), texts_mask.to(device), imgs.to(device)
                pred_vec = torch.sigmoid(mambaModel(texts, texts_mask, imgs))
                pred_labels = pred_vec.cpu().detach().numpy().tolist()

                obj = {
                    'leftFundus': '',
                    'rightFundus': '',
                    'N': '',
                    'D': '',
                    'G': '',
                    'C': '',
                    'A': '',
                    'H': '',
                    'M': '',
                    'O': ''
                }

                for prob_list in pred_labels:
                    obj['leftFundus'] = return_img_stream(left_image_path)
                    obj['rightFundus'] = return_img_stream(right_image_path)
                    for prob in prob_list:
                        have = 0
                        if prob >= threshold:
                            have = 1
                        obj[abbreviationList[finalIndex]] = have
                        predCol[finalIndex].append(have)
                        finalIndex = finalIndex + 1
                    socketio.emit('batch_diagnosis_response', obj, namespace='/model')

    abbreviationIndex = 0
    for abbreviation in abbreviationList:
        dfResult['predict ' + abbreviation] = predCol[abbreviationIndex]
        abbreviationIndex = abbreviationIndex + 1
    dfResult.to_csv(CSV_RESULT_FOLDER + csvFilename)
    socketio.emit('batch_diagnosis_response', "[OVER]", namespace='/model')

@socketio.on('uniappStartBatchDiagnosis',namespace='/model')
def uniapp_start_batch_diagnosis(args):
    print('uniapp start batch diagnosis')
    leftFilenameList = args['leftFilename']
    rightFilenameList = args['rightFilename']

    for leftname,rightname in zip(leftFilenameList,rightFilenameList):
        data = []

        left_image_path = BATCH_IMAGE_UPLOAD_FOLDER + '/' + leftname
        right_image_path = BATCH_IMAGE_UPLOAD_FOLDER + '/' + rightname

        formData = {
            "leftFilename": leftname,
            "rightFilename": rightname
        }
        headers = {
            'Content-Type': 'application/json' 
        }
        response = requests.post('http://yiyuy.nat300.top/getImageDescription', json=formData, headers=headers)
        response = response.json()
        describe = response['data']

        # 这个Label不需要管 随便赋个值
        label = [0,0,0,0,0,7,2,1]
        label = np.array(label, dtype=np.float32)
        label = torch.tensor(label)

        data.append((left_image_path, right_image_path, describe, label))

        ##################转化为Dataloader####################
        dataset_inputs = api_encode(data,config,'test')
        dataset = APIDataset(*dataset_inputs)
        dataloader = DataLoader(dataset=dataset, **config.checkout_params, collate_fn=dataset.collate_fn, drop_last=False)

        batch_index = 0
        for batch in tqdm(dataloader, desc='----- [Batch Predicting] '):
            with torch.no_grad():
                texts, texts_mask, imgs, labels = batch
                texts, texts_mask, imgs = texts.to(device), texts_mask.to(device), imgs.to(device)
                pred_vec = torch.sigmoid(mambaModel(texts, texts_mask, imgs))
                pred_labels = pred_vec.cpu().detach().numpy().tolist()

                for prob_list in pred_labels:
                    obj = {
                        'leftFilename': leftname,
                        'rightFilename': rightname,
                        'prob': prob_list
                    }
                    batch_index += 1
                socketio.emit('uniapp_batch_diagnosis_response', obj, namespace='/model')
    socketio.emit('uniapp_batch_diagnosis_response', "[OVER]", namespace='/model')
    
# socketio要运行的线程
def background_thread():
    model_name = datetime.datetime.now().strftime('%Y-%m-%d  %H:%M:%S')
    # 配置config
    weightName = customConfig['weightName']
    load_model_path = './MambaNet/save_models/CMAT/' + weightName

    lr = customConfig['learningRate']
    weight_decay = 1e-2
    epoch = customConfig['epoch']
    threshold = customConfig['threshold']

    text_pretrained_model = 'huawei-noah/TinyBERT_General_4L_312D'
    cv_pretrained_model = 'WAVM'
    fuse_model_type = 'CMAT'
    text_only = False
    img_only = False

    config.learning_rate = lr
    config.weight_decay = weight_decay
    config.epoch = epoch
    config.bert_name = text_pretrained_model
    config.resnet_name = cv_pretrained_model
    config.fuse_model_type = fuse_model_type
    config.load_model_path = load_model_path
    config.threshold = threshold
    config.only = 'img' if img_only else None
    config.only = 'text' if text_only else None
    if img_only and text_only: config.only = None
    print('TextModel: {}, ImageModel: {}, FuseModel: {}'.format(config.bert_name,config.resnet_name, config.fuse_model_type))
    
    # 数据集处理和device
    processor = Processor(config)
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')

    model = Model(config)
    trainer = Trainer(config, processor, model, device)

    # Dataset
    print('开始加载数据集')
    image_path = '../dataset/FundusDataset/Image'
    if customConfig['doTrain']:
        train_csv_path = '../dataset/FundusDataset/Csv/train_data.csv'
        train_df = pd.read_csv(train_csv_path)
        data = read_from_file(train_df,image_path)
        train_data, val_data = train_val_split(data)
        train_loader = processor(train_data, config.train_params, 'train')
        val_loader = processor(val_data, config.val_params, 'train')
        print('训练验证数据集加载成功')
        socketio.emit('dataset_end', namespace='/model')
    
        best_acc = 0
        for e in range(int(epoch)):
            global modelContinue,SocketThread
            if modelContinue == False:
                socketio.emit('model_end', {"data": "[early over]"}, namespace='/model')
                SocketThread = None
                

            _ = trainer.train(train_loader,socketio)
            _, vacc = trainer.valid(val_loader,socketio)
            if vacc > best_acc:
                best_acc = vacc
                save_path = './MambaNet/save_models'
                save_model(save_path, config.fuse_model_type, model,model_name=model_name)
                print('Update best model!')

    if customConfig['doTest']:
        test_csv_path = '../dataset/FundusDataset/Csv/test_data.csv'
        test_df = pd.read_csv(test_csv_path)
        test_data = read_from_file(test_df,image_path)
        test_loader = processor(test_data, config.test_params,'test')
        print('测试验证数据集加载成功')
        socketio.emit('dataset_end', namespace='/model')

        if config.load_model_path is not None:
            model.load_state_dict(torch.load(config.load_model_path))

        _ = trainer.predict(test_loader,socketio)
        

    # 训练结束
    socketio.emit('model_end', {"data": "[over]"}, namespace='/model')
    SocketThread = None
    return None

"""
input:
获取所有权重
output:
所有权重文件的名字
"""
@modelController.route('/getAllWeight',methods=['GET'])
def getAllWeight():
    # 指定文件夹路径
    WEIGHT_PATH = './MambaNet/save_models/CMAT/'
    folder_path = WEIGHT_PATH

    # 获取文件夹下的所有文件和文件夹
    all_items = os.listdir(folder_path)

    # 筛选出文件
    files = [item for item in all_items if os.path.isfile(os.path.join(folder_path, item))]
    return success(message='获取权重成功',data=files)

"""
获取默认配置
input:
none
output:
默认配置
"""
@modelController.route('/getDefaultConfig',methods=['GET'])
def getDefaultConfig():
    configuration = Configuration.query.first()
    return success(message='获取默认配置',data=configuration.to_dict())

"""
保存并使用当前配置
input:
none
output:
none
"""
@modelController.route('/saveConfig',methods=['POST'])
def saveConfig():
    weightName = request.args.get('weightName')
    learningRate = request.args.get('learningRate')
    epoch = request.args.get('epoch')
    threshold = request.args.get('threshold')

    if weightName == None:
        return error(message='请选择模型权重')
    
    if learningRate == None:
        return error(message='请填写学习率')
    
    if epoch == None:
        return error(message='请填写训练轮数')
    
    if threshold == None:
        return error(message='请填写阈值')

    Configuration.query.filter_by(id=1).update({'weight_name': weightName,'learning_rate': learningRate, 'epoch': epoch, 'threshold': threshold})
    db.session.commit()
    
    # 使用该配置
    global mambaModel,cv_model
    load_model_path = './MambaNet/save_models/CMAT/' + weightName
    mambaModel.load_state_dict(torch.load(load_model_path))
    # 拿到cv部分的单模态网络
    cv_model = mambaModel.img_model

    return success(message='保存配置成功')

"""
上传CSV/XLSX文件
input:
CSV/XLSX文件
output:
文件名称
"""
@modelController.route('/uploadCsv',methods=['POST'])
def uploadCsv():
    csvFile = request.files['file']
    save_path = CSV_UPLOAD_FOLDER + csvFile.filename
    fileType = csvFile.filename.split('.')[-1]
    if fileType != "csv" and fileType != "xlsx":
        return error(message='只允许上传CSV/XLSX文件')

    csvFile.save(save_path)
    return success(message='CSV/XLSX文件上传成功',data=csvFile.filename)

"""
uniapp 上传单张图片（重写的原因都是手机端默认的文件名太长 需要我们自己指定一个文件名 和下面这个batchupload只有路径不同）
input:
单张图片
output:
None
"""
@modelController.route('/uniappUploadImage',methods=['POST'])
def uniappUploadImage():
    imageFile = request.files['file']
    filename = request.args.get('filename')
    save_path = IMAGE_UPLOAD_FOLDER + filename
    imageFile.save(save_path)
    return success(message='文件上传成功')


"""
uniapp 批量上传图片（只能前端循环调用 所以后端每次还是保存一个文件 和uploadImage区别就是路径不同）
input:
image file
output:
文件名称
"""
@modelController.route('/uniappUploadBatchImage',methods=['POST'])
def uniappUploadBatchImage():
    imageFile = request.files['file']
    filename = request.args.get('filename')
    save_path = BATCH_IMAGE_UPLOAD_FOLDER + filename
    imageFile.save(save_path)
    return success(message='文件上传成功',data=filename)

"""
批量上传图片
input:
image file list
output:
none
"""
@modelController.route('/uploadBatchImage',methods=['POST'])
def uploadBatchImage():
    imgFileList = request.files.getlist('files')
    # 保存每一张图片
    for img in imgFileList:
        save_path = BATCH_IMAGE_UPLOAD_FOLDER + img.filename
        img.save(save_path)

    return success(message='图像批量上传成功')

"""
上传图片
input:
单张图像
sign: left/right 代表上传左眼或者右眼
output:
文件名
"""
@modelController.route('/uploadImage',methods=['POST'])
def uploadImage():
    sign = request.args.get('sign')
    imageFile = request.files['file']
    save_path = IMAGE_UPLOAD_FOLDER + imageFile.filename
    imageFile.save(save_path)
    data = {
        'sign': sign,
        'filename': imageFile.filename,
        '64Url': return_img_stream(save_path)
    }
    return success(message='文件上传成功',data=data)

"""
下载CSV文件功能
input:
csv filename
output:
result
"""
@modelController.route('/downloadCsv',methods=['GET'])
def downloadCsv():
    csv_filename = request.args.get('filename')
    csv_path = CSV_RESULT_FOLDER + csv_filename
    try:
        return send_file(csv_path, download_name='result.csv', as_attachment=True)
    except:
        return error(message='文件不存在')

"""
图像亮度增强
input:
左眼眼底名称
右眼眼底名称
output:
增强后左眼眼底图像 base64
增强后右眼眼底图像 base64
"""
@modelController.route('/enhanceImage',methods=['POST'])
def enhanceImage():
    leftFilename = request.get_json().get('leftFilename')
    rightFilename = request.get_json().get('rightFilename')

    leftFilenameWithoutJpg = leftFilename.split('.')[0]
    rightFilenameWithoutJpg = rightFilename.split('.')[0]

    leftImagePath = IMAGE_UPLOAD_FOLDER + leftFilename
    rightImagePath = IMAGE_UPLOAD_FOLDER + rightFilename

    img_paths = [leftImagePath,rightImagePath]
    enhancedLeftImage, enhancedRightImage = lowlight(img_paths)

    leftSavePath = IMAGE_RESULT_FOLDER + leftFilenameWithoutJpg + '_enhance_result.jpg'
    rightSavePath = IMAGE_RESULT_FOLDER + rightFilenameWithoutJpg + '_enhance_result.jpg'

    torchvision.utils.save_image(enhancedLeftImage, leftSavePath)
    torchvision.utils.save_image(enhancedRightImage, rightSavePath)

    data = {
        'leftEnhanceImage': return_img_stream(leftSavePath),
        'rightEnhanceImage': return_img_stream(rightSavePath)
    }

    return success(message='亮度增强成功',data=data)

"""
获取左右眼热力图
input:
左眼眼底名称
右眼眼底名称
output:
左眼眼底热力图 base64
右眼眼底热力图 base64
"""
@modelController.route('/makeGradCam',methods=['POST'])
def makeGradCam():
    leftFilename = request.get_json().get('leftFilename')
    rightFilename = request.get_json().get('rightFilename')

    leftFilenameWithoutJpg = leftFilename.split('.')[0]
    rightFilenameWithoutJpg = rightFilename.split('.')[0]

    leftImagePath = IMAGE_RESULT_FOLDER + leftFilenameWithoutJpg + '_enhance_result.jpg'
    rightImagePath = IMAGE_RESULT_FOLDER + rightFilenameWithoutJpg + '_enhance_result.jpg'

    leftSavePath = IMAGE_RESULT_FOLDER + leftFilenameWithoutJpg + '_grad_cam.jpg'
    rightSavePath = IMAGE_RESULT_FOLDER + rightFilenameWithoutJpg + '_grad_cam.jpg'

    img_paths = [leftImagePath,rightImagePath]
    leftGradCam, rightGradCam = visualization(img_paths)

    plt.imshow(leftGradCam)
    plt.axis("off")
    plt.savefig(leftSavePath, bbox_inches="tight", pad_inches=0)

    plt.imshow(rightGradCam)
    plt.axis("off")
    plt.savefig(rightSavePath, bbox_inches="tight", pad_inches=0)

    data = {
        'leftGradCam': return_img_stream(leftSavePath),
        'rightGradCam': return_img_stream(rightSavePath)
    }

    return success(message='热力图制作成功',data=data)

"""
获取模型预测疾病改率
input:
左眼眼底名称
右眼眼底名称
左眼眼底关键词
右眼眼底关键词
output:
疾病概率列表
"""
@modelController.route('/getProbList',methods=['POST'])
def getProbList():
    # 获取图像
    leftFilename = request.get_json().get('leftFilename')
    rightFilename = request.get_json().get('rightFilename')
    description = request.get_json().get('description')

    leftImagePath = IMAGE_UPLOAD_FOLDER + leftFilename
    rightImagePath = IMAGE_UPLOAD_FOLDER + rightFilename

    # 这个Label不需要管 随便赋个值
    label = [0,0,0,0,0,7,2,1]
    label = np.array(label, dtype=np.float32)
    label = torch.tensor(label)

    data = []
    data.append((leftImagePath, rightImagePath, description, label))

    ##################转化为Dataloader####################
    dataset_inputs = api_encode(data,config,'test')
    dataset = APIDataset(*dataset_inputs)
    dataloader = DataLoader(dataset=dataset, **config.checkout_params, collate_fn=dataset.collate_fn, drop_last=False)

    ##################预测###############################
    pred_labels = None
    finalResult = []
    finalIndex = 0
    abbreviationList = ['N','D','G','C','A','H','M','O']
    diseaseList = ['正常','糖尿病','青光眼','白内障','AMD','高血压','近视','其他疾病/异常']
    for batch in tqdm(dataloader, desc='----- [Predicting] '):
        with torch.no_grad():
            texts, texts_mask, imgs, labels = batch
            texts, texts_mask, imgs = texts.to(device), texts_mask.to(device), imgs.to(device)
            pred_vec = torch.sigmoid(mambaModel(texts, texts_mask, imgs))
            pred_labels = pred_vec.cpu().detach().numpy().tolist()
            # 坑：pred_labels是一个二维数组 第一维是batch size
            for prob_list in pred_labels:
                # 对获取的结果预处理下
                for prob in prob_list:
                    finalResult.append({
                        'abbreviation': abbreviationList[finalIndex],
                        'disease': diseaseList[finalIndex],
                        'prob': prob
                    })
                    finalIndex = finalIndex + 1
        return success(message='模型预测成功',data=finalResult)

"""
工具函数 读取表格文件
"""
def read_from_file(df,image_path):
    print('----- [Loading]')
    data = []
    for index, row in tqdm(df.iterrows()):
        left_image_filename, right_image_filename, describe = row['Left-Fundus'], row['Right-Fundus'], row['Description']

        left_image_path = image_path + '/' + left_image_filename
        right_image_path = image_path + '/' + right_image_filename

        label = df.loc[index, ['N','D','G','C','A','H','M','O']]
        label = label.to_numpy(dtype=np.float32)
        label = torch.tensor(label)

        data.append((left_image_path, right_image_path, describe, label))

    return data

def return_img_stream(img_local_path):
    """
        工具函数:
        获取本地图片流
        :param img_local_path:文件单张图片的本地绝对路径
        :return: 图片流
        """
    import base64
    img_stream = ''
    with open(img_local_path, 'rb') as img_f:
        img_stream = img_f.read()
        img_stream = base64.b64encode(img_stream).decode()
    return img_stream

def lowlight(image_paths):
    enhanced_images = []
    with torch.no_grad():
        for image_path in image_paths:
            data_lowlight = Image.open(image_path)

            data_lowlight = (np.asarray(data_lowlight)/255.0)

            data_lowlight = torch.from_numpy(data_lowlight).float()

            h=(data_lowlight.shape[0]//scale_factor)*scale_factor
            w=(data_lowlight.shape[1]//scale_factor)*scale_factor
            data_lowlight = data_lowlight[0:h,0:w,:]
            data_lowlight = data_lowlight.permute(2,0,1)
            data_lowlight = data_lowlight.to(device).unsqueeze(0)

            enhanced_image,params_maps = DCE_net(data_lowlight)
            enhanced_images.append(enhanced_image)
    
    return enhanced_images[0], enhanced_images[1]

def visualization(img_paths):
    
    # tensor to image
    def tensor2img(tensor,heatmap=False,shape=(224,224)):
        np_arr=tensor.detach().numpy()#[0]
        #对数据进行归一化
        if np_arr.max()>1 or np_arr.min()<0:
            np_arr=np_arr-np_arr.min()
            np_arr=np_arr/np_arr.max()
        #np_arr=(np_arr*255).astype(np.uint8)
        if np_arr.shape[0]==1:
            np_arr=np.concatenate([np_arr,np_arr,np_arr],axis=0)
        np_arr=np_arr.transpose((1,2,0))
        return np_arr
        
    target_layers = [cv_model.mamba_model.layers[-1][-2][1].sk_conv]
    visualizations = []
    
    for img_path in img_paths:
        bin_data=torchvision.io.read_file(img_path)
        img=torchvision.io.decode_image(bin_data)/255
        img=img.unsqueeze(0)
        input_tensor=torchvision.transforms.functional.resize(img,[224, 224])

        with GradCAM(model=cv_model, target_layers=target_layers) as cam:
            grayscale_cams = cam(input_tensor=input_tensor)
            for grayscale_cam,tensor in zip(grayscale_cams,input_tensor):
                #将热力图结果与原图进行融合
                rgb_img=tensor2img(tensor)
                visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
                visualizations.append(visualization)
    return visualizations[0], visualizations[1]