import torch
import torch.nn.functional as F
import numpy as np
import cv2
from sklearn.metrics import accuracy_score, roc_auc_score
import os
import json
from PIL import Image
from models.Vilref import ViLref
from args import parse_args
from data.datamodule import FundusDataModule

def get_feature_maps_and_grads(model, imgs, target_label):
    """
    获取模型最后一个 Transformer 块的特征图和梯度。
    """
    model.eval()
    feature_maps = []
    gradients = []

    def forward_hook(module, input, output):
        feature_maps.append(output)

    def backward_hook(module, grad_in, grad_out):
        gradients.append(grad_out[0])

    # 可以使用最后一个 Transformer 层，因为 CLS token 的梯度通常更强
    last_block = model.backbone.blocks[-1]  # 修改为最后一个层
    handle_forward = last_block.register_forward_hook(forward_hook)
    handle_backward = last_block.register_backward_hook(backward_hook)

    logits = model(imgs)
    probs = F.softmax(logits, dim=1)
    target_score = probs[0, target_label]

    model.zero_grad()
    target_score.backward()

    handle_forward.remove()
    handle_backward.remove()

    return feature_maps[0], gradients[0]

def compute_gradcam(feature_maps, gradients):
    """
    计算 Grad-CAM 掩码，基于 CLS token 的梯度加权 patch token 的特征。

    参数:
        feature_maps: 特征图，形状 (batch_size, num_tokens, embed_dim)
        gradients: 梯度，形状 (batch_size, num_tokens, embed_dim)

    返回:
        cam: Grad-CAM 掩码，形状 (batch_size, num_patches)
    """
    # 提取 CLS token 的梯度
    cls_gradients = gradients[:, 0, :]  # (batch_size, embed_dim)
    weights = cls_gradients  # CLS token 的梯度作为权重，形状 (batch_size, embed_dim)

    # 提取 patch token 的特征（不包括 CLS token）
    patch_features = feature_maps[:, 1:, :]  # (batch_size, num_patches, embed_dim)

    # 使用 CLS token 的梯度对 patch token 的特征进行加权
    cam = torch.einsum('bd,bpd->bp', weights, patch_features)  # (batch_size, num_patches)
    
    # 应用 ReLU 确保非负值
    cam = F.relu(cam)
    
    return cam

def generate_visualization(img, cam, img_size=224):
    """
    将 Grad-CAM 掩码叠加到原始图像上，并返回可视化和掩码。
    """
    # 假设 num_patches 是 patch 的数量，例如 196（14x14）
    patch_size = int(np.sqrt(cam.shape[1]))  # 假设 patch 数量是平方数，例如 14
    cam = cam.reshape(patch_size, patch_size)
    
    # 调整大小到图像尺寸
    cam = cv2.resize(cam.detach().cpu().numpy(), (img_size, img_size), interpolation=cv2.INTER_LINEAR)
    
    # 归一化到 [0, 1]
    cam = (cam - cam.min()) / (cam.max() - cam.min() + 1e-8)
    
    # 转换为 0-255 范围
    mask = (cam * 255).astype(np.uint8)
    heatmap = cv2.applyColorMap(mask, cv2.COLORMAP_JET)
    
    # 调整原始图像大小并叠加热力图
    img = cv2.resize(img, (img_size, img_size))
    cam_img = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
    
    return cam_img, mask
# Function to extract regions from mask


def extract_regions_from_mask(mask, image_path, crop_info, resize=(224, 224), threshold=128, padding=10, region_save_dir='./experiments/regions_extract'):
    """
    从掩码中提取感兴趣区域并保存。

    参数:
        mask: Grad-CAM 掩码 (numpy 数组)
        image_path: 原始图像路径
        crop_info: 裁剪信息字典
        resize: 调整后图像大小
        threshold: 二值化阈值
        padding: 边界填充

    返回:
        regions: 包含区域信息的列表
    """
    binary_mask = (mask > threshold).astype(np.uint8) * 255
    contours, _ = cv2.findContours(
        binary_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    original_image = Image.open(image_path)
    original_array = np.array(original_image)
    save_dir = os.path.join(region_save_dir,
                            os.path.basename(image_path).split('.')[0])
    os.makedirs(save_dir, exist_ok=True)
    regions = {
        'region_dir': save_dir,
        'region_record': []
    }

    for i, contour in enumerate(contours):
        x, y, w, h = cv2.boundingRect(contour)
        x_pad = max(0, x - padding)
        y_pad = max(0, y - padding)
        w_pad = min(mask.shape[1] - x_pad, w + 2 * padding)
        h_pad = min(mask.shape[0] - y_pad, h + 2 * padding)
        size = max(w_pad, h_pad)
        if w_pad < size:
            x_pad = max(0, x_pad - (size - w_pad) // 2)
            w_pad = size
        if h_pad < size:
            y_pad = max(0, y_pad - (size - h_pad) // 2)
            h_pad = size

        # Map back to original image coordinates
        resize_ratio_w = resize[0] / crop_info['cropped_size'][1]
        resize_ratio_h = resize[1] / crop_info['cropped_size'][0]
        x_c = x_pad / resize_ratio_w
        y_c = y_pad / resize_ratio_h
        w_c = w_pad / resize_ratio_w
        h_c = h_pad / resize_ratio_h
        crop_box = crop_info['crop_box']
        x_o = int(x_c + crop_box[2])
        y_o = int(y_c + crop_box[0])
        w_o = int(w_c)
        h_o = int(h_c)

        # Extract and save region
        region = original_array[y_o:y_o+h_o, x_o:x_o+w_o]
        region_image = Image.fromarray(region)
        save_path = os.path.join(save_dir, f'region_{i}.png')
        region_image.save(save_path)
        regions['region_record'].append({
            'save_path': save_path,
            'bbox': (x_o, y_o, x_o+w_o, y_o+h_o)
        })
    return regions

def process_predict(model,
                    predict_loader,
                    device,
                    predict_visual_dir,
                    save_visual=True,
                    record_json_save_path='./experiments/region_record.json',
                    crop_region_save_dir='./experiments/heatmap_region',
                    original_data_root='../../Dataset/public_dataset'):
    """
    处理一个 batch 的数据，生成 Grad-CAM 可视化和提取区域，同时计算多分类的 accuracy 和 AUC。

    参数:
        model: 模型
        predict_loader: 数据加载器
        device: 设备
        predict_visual_dir: 可视化保存目录
        save_visual: 是否保存可视化结果
        record_json_save_path: 保存区域记录的 JSON 文件路径
        crop_region_save_dir: 保存裁剪区域的目录
        original_data_root: 原始数据根目录
    """
    os.makedirs(crop_region_save_dir, exist_ok=True)
    region_record = {}
    model = model.to(device)

    # 存储所有预测和真实标签用于计算指标
    all_preds = []
    all_labels = []
    all_probs = []  # 用于计算 AUC 的概率值

    for batch in predict_loader:
        imgs, labels_dict = batch
        labels = labels_dict['label']
        image_paths = labels_dict['image_path']
        crop_infos = labels_dict['crop_info']
        original_rel_paths = labels_dict['original_rel_path']
        imgs = imgs.to(device)
        labels = labels.to(device)

        # 前向传播
        logits = model(imgs)
        probs = F.softmax(logits, dim=1)  # 计算概率值
        preds = torch.argmax(logits, dim=1)  # 预测类别

        # 收集预测和真实标签
        all_preds.extend(preds.cpu().numpy())
        all_labels.extend(labels.cpu().numpy())
        all_probs.extend(probs.cpu().detach().numpy())  # 概率值用于 AUC

        # 处理 Grad-CAM 和可视化
        for i, (img, label, pred, img_path, original_rel_path, crop_info) in enumerate(zip(imgs, labels, preds, image_paths, original_rel_paths, crop_infos)):
            if label > 0 and pred > 0:
                # Get feature maps and gradients
                feature_maps, gradients = get_feature_maps_and_grads(
                    model, img.unsqueeze(0), label.item())
                cam = compute_gradcam(feature_maps, gradients)
                img_original = cv2.imread(img_path)

                # Generate visualization and mask
                cam_img, mask = generate_visualization(img_original, cam)

                # Save visualization if enabled 
                if save_visual:
                    save_path = os.path.join(
                        predict_visual_dir, 'gradcam', f"pred_{pred.item()}_label_{label.item()}_{os.path.basename(img_path)}")
                    os.makedirs(os.path.dirname(save_path), exist_ok=True)
                    cv2.imwrite(save_path, cam_img)

                # # Extract regions from mask and save to JSON 
                # regions = extract_regions_from_mask(
                #     mask, 
                #     os.path.join(original_data_root, original_rel_path),
                #     crop_info,
                #     resize=mask.shape,
                #     threshold=128,
                #     padding=15,
                #     region_save_dir=crop_region_save_dir
                #     )
                # region_record[os.path.basename(img_path)] = regions

    # 保存区域记录到 JSON 文件
    with open(record_json_save_path, 'w') as f:
        json.dump(region_record, f, indent=4)

    # 计算性能指标
    all_preds = np.array(all_preds)
    all_labels = np.array(all_labels)
    all_probs = np.array(all_probs)

    # 计算 Accuracy
    accuracy = accuracy_score(all_labels, all_preds)
    print(f"Accuracy: {accuracy:.4f}")

    # 计算 AUC（多分类，自动处理类别数量和种类）
    try:
        # 使用 multi_class='ovr'，也可以改为 'ovo'
        auc = roc_auc_score(
            all_labels,
            all_probs,
            multi_class='ovr',  # One-vs-Rest 方式
            average='macro'     # 宏平均
        )
        print(f"Macro-Average AUC (ovr): {auc:.4f}")

        # 如果需要 'ovo' 方式，可以再计算一次
        auc_ovo = roc_auc_score(
            all_labels,
            all_probs,
            multi_class='ovo',  # One-vs-One 方式
            average='macro'     # 宏平均
        )
        print(f"Macro-Average AUC (ovo): {auc_ovo:.4f}")

    except ValueError as e:
        print(f"Error calculating AUC: {e}")
        auc = None
        auc_ovo = None

    # 返回性能指标
    metrics = {
        "accuracy": accuracy,
        "auc_ovr": auc,
        "auc_ovo": auc_ovo
    }

    return metrics
if __name__ == "__main__":
    args, cfg = parse_args()
    original_data_root = '/mnt/d/public_dataset/APTOS'
    data_module = FundusDataModule(
        data_dir=cfg['data']['data_dir'],
        batch_size=cfg['trainer']['batch_size'],
        input_size=cfg['data'].get('input_size', 224),
        num_workers=cfg['trainer'].get('num_workers', 4),
        dataset_name=cfg['data']['dataset_name'],
        split_name=cfg['data']['split_name'],
        use_val=cfg['data'].get('use_val', False)
    )
    model = ViLref.load_from_checkpoint(
        checkpoint_path="checkpoints/APTOS_val_1.ckpt",
        backbone=cfg['model']['backbone'],
        embed_dim=cfg['model']['embed_dim'],
        num_class=cfg['data']['num_classes'],
        opti_cfg=cfg['optimizer'],
        scheduler_cfg=cfg['scheduler'],
        batch_size=cfg['trainer']['batch_size'],
        loss_terms=cfg['model'].get('loss_terms', None),
        predict_visual_dir='./experiments/visual'
    )
    data_module.setup()
    predict_dataloader_1 = data_module.train_dataloader()
    predict_dataloader_2 = data_module.val_dataloader()

    predict_dataloader_1.dataset.debug = True
    predict_dataloader_2.dataset.debug = True
    predict_visual_dir = './experiments/visual'

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"using device {device}")

    # 处理第一个数据加载器并计算指标
    metrics_1 = process_predict(
        model,
        predict_dataloader_1,
        device,
        predict_visual_dir='./experiments/visual_val1',
        save_visual=True,
        record_json_save_path='./experiments/region_record_1.json',
        original_data_root=original_data_root,
        crop_region_save_dir='./experiments/crop_region_1'
    )
    print("Metrics for dataloader 1:", metrics_1)

    # 处理第二个数据加载器并计算指标
    metrics_2 = process_predict(
        model,
        predict_dataloader_2,
        device,
        predict_visual_dir='./experiments/visual_val2',
        save_visual=True,
        record_json_save_path='./experiments/region_record_2.json',
        original_data_root=original_data_root,
        crop_region_save_dir='./experiments/crop_region_2'
    )
    print("Metrics for dataloader 2:", metrics_2)