from args import parse_args
from data.datamodule import FundusDataModule
from models.Vilref import ViLref
import torch
import numpy as np
from sklearn.metrics import accuracy_score, roc_auc_score, recall_score
import os
import json
import cv2

def compute_metrics(labels, preds, num_classes):
    metrics = {}
    acc = accuracy_score(labels, preds)
    metrics['accuracy'] = acc
    if len(np.unique(labels)) > 1:
        one_hot_labels = np.eye(num_classes)[labels]
        auc = roc_auc_score(one_hot_labels, np.eye(num_classes)[preds], multi_class='ovr')
        metrics['auc'] = auc
    else:
        metrics['auc'] = float('nan')
    for cls in range(num_classes):
        cls_mask = labels == cls
        if cls_mask.sum() > 0:
            cls_labels = np.where(cls_mask, 1, 0)
            cls_preds = np.where(preds == cls, 1, 0)
            metrics[f'class_{cls}_acc'] = accuracy_score(cls_labels, cls_preds)
            metrics[f'class_{cls}_recall'] = recall_score(cls_labels, cls_preds, zero_division=0)
            if len(np.unique(cls_labels)) > 1:
                metrics[f'class_{cls}_auc'] = roc_auc_score(cls_labels, cls_preds)
            else:
                metrics[f'class_{cls}_auc'] = float('nan')
        else:
            metrics[f'class_{cls}_acc'] = float('nan')
            metrics[f'class_{cls}_recall'] = float('nan')
            metrics[f'class_{cls}_auc'] = float('nan')
    return metrics

def rollout(attentions, discard_ratio=0.5, head_fusion="max"):
    result = torch.eye(attentions[0].size(-1), device=attentions[0].device)
    with torch.no_grad():
        for attention in attentions:
            if head_fusion == "mean":
                attention_heads_fused = attention.mean(dim=1)  # (32, 197, 197)
            elif head_fusion == "max":
                attention_heads_fused = attention.max(dim=1)[0]
            elif head_fusion == "min":
                attention_heads_fused = attention.min(dim=1)[0]
            else:
                raise ValueError("Unsupported head fusion type")
            flat = attention_heads_fused.view(attention_heads_fused.size(0), -1)  # (32, 38809)
            _, indices = flat.topk(int(flat.size(-1) * discard_ratio), dim=-1, largest=False)  # (32, 34928)
            # 使用 scatter_ 将指定索引置为 0
            flat.scatter_(1, indices, 0)
            attention_heads_fused = flat.view_as(attention_heads_fused)  # 恢复形状 (32, 197, 197)
            I = torch.eye(attention_heads_fused.size(-1), device=attention_heads_fused.device)
            a = (attention_heads_fused + 1.0 * I) / 2
            a = a / a.sum(dim=-1, keepdim=True)
            result = torch.matmul(a, result)
        mask = result[0, 0, 1:]  # 去掉 CLS token
        width = int(mask.size(-1) ** 0.5)
        mask = mask.reshape(width, width).cpu().numpy()
        mask = mask / np.max(mask)
        return mask

if __name__ == "__main__":
    args, cfg = parse_args()
    
    data_module = FundusDataModule(
        data_dir=cfg['data']['data_dir'],
        batch_size=cfg['trainer']['batch_size'],
        input_size=cfg['data'].get('input_size', 224),
        num_workers=cfg['trainer'].get('num_workers', 4),
        dataset_name=cfg['data']['dataset_name'],
        split_name=cfg['data']['split_name'],
        use_val=cfg['data'].get('use_val', False)
    )
    model = ViLref.load_from_checkpoint(
        # checkpoint_path=f"{cfg['checkpoints_dir']}/{cfg['data']['dataset_name']}.ckpt",
        checkpoint_path=f"./logs/training/version_5/checkpoints/epoch=49-step=3850.ckpt",
        backbone=cfg['model']['backbone'],
        embed_dim=cfg['model']['embed_dim'],
        num_class=cfg['data']['num_classes'],
        opti_cfg=cfg['optimizer'],
        scheduler_cfg=cfg['scheduler'],
        batch_size=cfg['trainer']['batch_size'],
        loss_terms=cfg['model'].get('loss_terms', None),
        predict_visual_dir='./experiments/visual'
    )
    data_module.setup()
    predict_dataloader = data_module.val_dataloader()
    record_json = {}
    all_preds = []
    all_labels = []
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.to(device)
    model.eval()

    for batch_idx, batch in enumerate(predict_dataloader):
        imgs, labels_dict = batch
        labels = labels_dict['label']
        image_paths = labels_dict['image_path']
        imgs = imgs.to(device)
        labels = labels.to(device)

        # 手动收集注意力权重
        attentions = []
        def get_attention(module, input, output):
            if isinstance(output, tuple):
                attentions.append(output[1])

        K = 1
        handles = []
        # 只在最后 K 个块上注册钩子
        for block in model.backbone.blocks[-K:]:
            handle = block.attn.register_forward_hook(get_attention)
            handles.append(handle)

        # 前向传播
        with torch.no_grad():
            logits = model(imgs)
            preds = torch.argmax(logits, dim=1)

        # 移除钩子
        for handle in handles:
            handle.remove()

        all_preds.append(preds.cpu().numpy())
        all_labels.append(labels.cpu().numpy())

        predict_visual_dir = './experiments/visual'

        if predict_visual_dir:
            for i, (pred, label, img_path) in enumerate(zip(preds, labels, image_paths)):
                if label == -1:
                    save_path = os.path.join(predict_visual_dir, 'unverify')
                elif label == 0:
                    continue
                else:
                    is_correct = pred == label
                    save_path = os.path.join(predict_visual_dir, 'hit' if is_correct else 'miss')
        
                # 计算 Attention Rollout
                mask = rollout(attentions, discard_ratio=0.95, head_fusion="mean")
                mask = cv2.resize(mask, (224, 224), interpolation=cv2.INTER_LINEAR)  # 确保形状为 (224, 224)
        
                # 从原始路径读取图像而不是使用处理过的 img
                img_original = cv2.imread(img_path)  # 读取原始图像（BGR 格式）
                if img_original is None:
                    print(f"警告：无法读取图像 {img_path}")
                    continue
                img_original = cv2.resize(img_original, (224, 224), interpolation=cv2.INTER_LINEAR)  # 调整到模型输入大小
                

                # 创建保存目录
                mask_dir = os.path.join(save_path, 'mask')
                visual_dir = os.path.join(save_path, 'visual')
                images_dir = os.path.join(save_path, 'images')  # 新增原始图像保存目录
                os.makedirs(mask_dir, exist_ok=True)
                os.makedirs(visual_dir, exist_ok=True)
                os.makedirs(images_dir, exist_ok=True)
                # 生成热力图和叠加图
                heatmap = cv2.applyColorMap((mask * 255).astype(np.uint8), cv2.COLORMAP_JET)
                cam_image = cv2.addWeighted(img_original, 0.5, heatmap, 0.5, 0)
        
                # 文件名
                base_name = f"stage_{str(int(label))}_" + os.path.basename(img_path)
        
                # 保存路径
                mask_save_path = os.path.join(mask_dir, base_name)
                visual_save_path = os.path.join(visual_dir, base_name)
                image_save_path = os.path.join(images_dir, base_name)
        
                # 保存文件
                cv2.imwrite(mask_save_path, (mask * 255).astype(np.uint8))
                cv2.imwrite(visual_save_path, cam_image)
                cv2.imwrite(image_save_path, img_original)  # 保存原始图像副本
        
                # 记录相对路径
                rel_mask_path = os.path.join(save_path[len(predict_visual_dir)+1:], 'mask', base_name)
                rel_visual_path = os.path.join(save_path[len(predict_visual_dir)+1:], 'visual', base_name)
                rel_image_path = os.path.join(save_path[len(predict_visual_dir)+1:], 'images', base_name)
        
                # 更新记录
                record_json[base_name] = {
                    'mask_path': rel_mask_path,
                    'visual_path': rel_visual_path,
                    'image_path': rel_image_path,  # 新增原始图像路径
                    'prediction': pred.item(),
                    'label': label.item()
                }

    all_preds = np.concatenate(all_preds, axis=0)
    all_labels = np.concatenate(all_labels, axis=0)
    valid_mask = all_labels >= 0
    valid_preds = all_preds[valid_mask]
    valid_labels = all_labels[valid_mask]
    invalid_mask = all_labels == -1
    invalid_preds = all_preds[invalid_mask]
    num_classes = cfg['data']['num_classes']
    if len(valid_labels) > 0:
        metrics = compute_metrics(valid_labels, valid_preds, num_classes)
        print("Valid Metrics:",json.dumps( metrics, indent=4))
    print("Class Ratios:", ratios)
    if predict_visual_dir and record_json:
        record_file = os.path.join(predict_visual_dir, 'record.json')
        os.makedirs(predict_visual_dir, exist_ok=True)
        with open(record_file, 'w') as f:
            json.dump(record_json, f, indent=4)
        print(f"Saved prediction records to {record_file}")
        
