import sys
import math
import numpy as np
import json
import time
from collections import defaultdict

from datetime import datetime
from pathlib import Path
from IPython import embed
import itertools
from io import StringIO
from torchinfo import summary

from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from tqdm import tqdm
import umap
from mpl_toolkits.mplot3d import Axes3D
from sklearn.manifold import TSNE

import torch
import torch.optim as optim
import torch.nn.functional as F
from torch.optim.lr_scheduler import LambdaLR
from torch.optim.adamw import AdamW
from lion_pytorch import Lion

from torch.utils.tensorboard.writer import SummaryWriter
from torch.utils.data.dataloader import DataLoader
from torchvision.utils import make_grid
import torchvision.utils as vutils
import torchvision.transforms as transforms

from dataset import ZhCharDataset
from utils import init_log, logit_2_prob, add_text_to_img, config2Log
from utils import register_hook_for_max_Path, get_feature_maps
from utils import L_to_RGB
from third.torch_receptive_field import receptive_field, receptive_field_for_unit

from third.pytorch_grad_cam import GradCAM, HiResCAM, ScoreCAM, GradCAMPlusPlus, AblationCAM, XGradCAM, EigenCAM, FullGrad, FinerCAM, ShapleyCAM, LayerCAM
from third.pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from third.pytorch_grad_cam.utils.image import show_cam_on_image



class EvaluaterConfig:
    # checkpoint settings
    ckpt_path = None
    num_workers = 1 # for DataLoader

    def __init__(self, **kwargs):
        for k,v in kwargs.items():
            setattr(self, k, v)

class Evaluater:
    # state: 'test', 'free_test', 'max_act_patch', 'gradCam'
    def __init__(self, model, dataset, config, state='test'):
        self.config = config
        self.model = model
        self.dataset = dataset
        self.to_pil = transforms.ToPILImage()
        self.to_tensor = transforms.ToTensor()
        self.layer_info = []  # Store layer information for visualization
        self.state = state
        

        model_name = self.model.module.name if hasattr(self.model, "module") else self.model.name
        if state == 'test':
            run_id = f"{model_name}"
            # Initialize logging and tensorboard
            log_dir = Path(f"{config.log_dir}")
            log_dir.mkdir(parents=True, exist_ok=True)
            self.logger = init_log(f'{run_id}_test_log', log_dir/f"{run_id}_val.log")

            self.logger.info(f"Model: {model.name}")
            model_logInf = config2Log(model.model_conf)
            self.logger.info(f"{model_logInf}")

            runs_dir = Path(f"{config.run_dir}/{run_id}_test")
            runs_dir.mkdir(parents=True, exist_ok=True)
            self.writer = SummaryWriter(runs_dir)
        elif state == 'free_test':
            # run_id = f"{model_name}"
            self.img_dir = Path(f"{config.img_dir}")
            self.img_dir.mkdir(parents=True, exist_ok=True)
        elif state == 'max_act_patch':
            run_id = f"{model_name}"
            self.img_dir = Path(f"{config.img_dir}")
            self.img_dir.mkdir(parents=True, exist_ok=True)
            self.logger = init_log(f'{run_id}_maxAct_log', self.img_dir/f"{run_id}_maxActPatch.log")
        elif state == 'gradCam':
            run_id = f"{model_name}"
            self.img_dir = Path(f"{config.img_dir}")
            self.img_dir.mkdir(parents=True, exist_ok=True)
            self.logger = init_log(f'{run_id}_gradCam_log', self.img_dir/f"{run_id}_gradCam.log")
            

        # take over whatever gpus are on the system
        self.device = 'cpu'
        if torch.cuda.is_available():
            self.device = torch.cuda.current_device()
            print(f"Set tester.device to 'cuda:{self.device}'")
            # self.model = torch.nn.DataParallel(self.model).to(self.device)
        self.model.to(self.device)

        if self.model.name in ['TripletLoss_Knn', 'TripletLoss_Knn_v2']:
            self.model.trained_embeds.to(self.device)
            self.model.trained_labels.to(self.device)

    def free_test(self):
        """单样本测试和特征图可视化"""
        model = self.model.module if hasattr(self.model, "module") else self.model
        assert model.name in ['ArcFont', 'MDW_Net', 'MobileNetV4_Font'], "该模型尚未支持特征图查看功能"
        register_hook_for_max_Path(model)
        model.eval()
        print(f"Length of test_data:{len(self.dataset)}")
        while(True): 

            # 确保在forward开始时清空缓存
            if hasattr(model, 'feature_maps'):
                if len(model.feature_maps) > 0:
                    model.feature_maps.clear()

            while True:
                sample_idx = input("input sample_id:")
                # layer_id= input(f"input layer_id(1 - {model.layer_num}):")
                try:
                    # 尝试将输入转换为整数
                    sample_idx = int(sample_idx)
                    # layer_id = int(layer_id)
                    # 如果转换成功，则跳出循环
                    break
                except ValueError:
                    # 如果转换失败，捕获异常并提示用户重新输入
                    print("这不是一个有效的数字，请重新输入。")
                
            # 验证样本索引
            if sample_idx < 0 or sample_idx >= len(self.dataset):
                raise ValueError(f"Invalid sample index: {sample_idx}. Must be between 0 and {len(self.dataset)-1}")
                
            # 获取指定样本[c,h,w]
            x, y, idx = self.dataset[sample_idx]   # x:(1,96,96) y:(1)
            print(f"y.shape:{y.shape}")
            x = x.unsqueeze(0).to(self.device)  # -> (1, 1, 96, 96)
            y = y.unsqueeze(0).to(self.device) # -> (1, 1)
            
            # 前向传播
            with torch.no_grad():
                if any(n in model.name for n in ['CAE_v3', 'ArcFont', 'MobileNet', 'SwordNet', 'MobileNet', 'MDW_Net']):
                    if model.classifier.__class__.__name__ == 'SubCenterArcFace':
                        model.classifier.update(margin = 0.0)
                    start_time = time.time()
                    logits, _ = model(x, y)
                    y_pred = logits.argmax(-1)
                    inference_latency = time.time() - start_time
                    model.layer_num = len(model.feature_maps)
                else:
                    raise NotImplementedError(f"Free test not supported for model: {model.name}")
            
            # 移除钩子
            # handle.remove()
            
            # 处理所有的层,并可视化
            for i in range(model.layer_num):
                # feature_maps:{'layer_1':tensor(B,C,H,W)}
                feature_maps = get_feature_maps(model, i)
                # 可视化特征图
                if feature_maps is not None:
                    self._visualize_feature_maps(feature_maps, sample_idx, layer_id=i, state=self.state)
            
            # 显示预测结果和层信息
            true_label = self.dataset.i2label[int(y[-1].cpu())]
            pred_label = self.dataset.i2label[int(y_pred.cpu())]
            print(f"\nSample Index: {sample_idx}")
            print(f"True Label: {true_label}")
            print(f"Predicted Label: {pred_label}")
            print(f"Inference latency: {inference_latency}s")
            # print(f"Visualized Layer: {layer_id}")
            
            # 保存层信息
            self.layer_info.append({
                'sample_idx': sample_idx,
                # 'layer_id': layer_id,
                'feature_maps': feature_maps
            })
        
        return true_label, pred_label

    def _visualize_feature_maps(self, feature_maps, sample_idx, layer_id=1, state=''):
        """
        make feature_maps to img_grid.

        Args:
        feature_maps(tensor): [B,C,H,W] or [C,H,W]
        layer_id(int):  the index of channel to be show
        sample_idx(int): the index of the image in dataset
        ops(str):  'tensorboard', will write grid to tensorboard,
                   'std', will show grid_images with 
        """
        
        # print(f"shape of feature_maps:{feature_maps.shape}")
        """将某层的特征图转换为网格并写入TensorBoard或保存"""
        # 处理不同维度特征图
        if len(feature_maps.shape) == 4:  # [1,C,H,W]
            assert feature_maps.size(0) == 1, "Batch dimension should be 1."
            feature_maps = feature_maps.squeeze(0)  # 去除batch维度 -> [C,H,W]
        elif len(feature_maps.shape) == 2:  # [1,D]
            # 将特征向量转换为2D视图
            size = int(math.sqrt(feature_maps.size(-1)))
            feature_maps = feature_maps.view(1, size, size)  # [1,sqrt(D),sqrt(D)]
        
        # 处理单通道特征图
        if feature_maps.size(0) == 1:  # 单通道
            feature_maps = torch.cat([feature_maps]*3, dim=0)  # 转为RGB
        
        # 限制显示的特征图数量并创建网格
        max_feature_maps = min(64, feature_maps.size(0))
        grid = make_grid(feature_maps.unsqueeze(1), 
                        nrow=int(math.sqrt(max_feature_maps)), 
                        normalize=False, 
                        padding=2,
                        scale_each=True,
                        pad_value=0.0)
        
        # 添加标题文本
        grid_img = self.to_pil(grid)
        draw = ImageDraw.Draw(grid_img)
        try:
            font = ImageFont.truetype("Arial.ttf", 20)
        except:
            font = ImageFont.load_default()
        text = f"Layer {layer_id} - Sample {sample_idx}"
        draw.text((10,10), text, fill=(255,0,0), font=font)
        
        # 写入TensorBoard
        if state == 'test':
            self.writer.add_image(f"FeatureMaps/Layer_{layer_id}", 
                                self.to_tensor(grid_img), 
                                global_step=sample_idx)
        elif state == 'free_test':
            grid_img.save(f'{self.config.img_dir}/sample_{sample_idx}-layer_{layer_id}.png')
            print(f"feature maps were saved to {self.config.img_dir}/sample_{sample_idx}-layer_{layer_id}.png")

    def test(self):
        self.time_stamp = datetime.now().strftime('%Y%m%d_%H%M%S') #time_stamp is uesd to save checkpoint
        config = self.config
        model = self.model.module if hasattr(self.model, "module") else self.model

        model.eval()
        data = self.dataset
        loader = DataLoader(data, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)


        self.logger.info(f"test on {Path(self.dataset.root).joinpath(self.dataset.mod)}")
        self.logger.info(f"INFO of dataset.transforms:{self.dataset.transforms.__dict__.__repr__()}")

        sample_x, sample_y, _ = self.dataset[0]
        C,H,W = sample_x.shape

        output_temp = StringIO()
        sys.stdout = output_temp
        summary_data = torch.randn([self.config.batch_size, C, H, W]).to(self.device)
        summary_label = torch.ones([self.config.batch_size]).long().to(self.device)
        summary(model, input_data=[summary_data, summary_label])
        sys.stdout = sys.__stdout__
        self.logger.info(output_temp.getvalue())

        losses = []
        correct = 0
        total_latency = 0
        error_num = 0
        error_imgs_tensor = []
        error_list = []
        total = 0
        print('Begin testing')
        pbar = tqdm(enumerate(loader), total=len(loader))
        for it, (x, y, idx) in pbar:
            # place data on the correct device
            x = x.to(self.device)
            y = y.to(self.device)

            # forward the model
            with torch.set_grad_enabled(False):
                if any(n in model.name for n in ['CAE_v3', 'ArcFont', 'MobileNet', 'SwordNet', 'MDW_Net']):
                    start_time = time.time()

                    logits, loss = model(x, y)
                    y_pred = logits.argmax(-1)

                    inference_time = time.time() - start_time
                    total_latency += inference_time

                    loss = loss.mean() # collapse all losses if they are scattered on multiple gpus
                    losses.append(loss.item())

                    correct += (y_pred == y).sum()
                    for i in range(x.shape[0]):
                        true_label = data.i2label[int(y[i].cpu())]
                        pred_label = data.i2label[int(y_pred[i].cpu())]
                        # print(f"True Label: {true_label}")
                        # print(f"Predicted Label: {pred_label}")
                        if y[i] != y_pred[i]:

                            error_info = (f"pred_label:{pred_label}", f"true_label:{true_label}", logit_2_prob(logits[i], data.i2label))
                            #信息只写入文件
                            self.logger.debug(f"error_idx: {idx[i]}, {error_info}")
                            # err_image = data.deNorm(x[i]).cpu().numpy()
                            # writer.add_image(f'识别错误分类的图像/{label}/{pred_label}', err_image, global_step=error_num)
                            error_img_tensor = data.deNorm(x[i])
                            text = f"idx: {idx[i]}, T: {true_label}, F:{pred_label}"
                            # error_imgs.append(data.deNorm(x[i]).cpu().numpy())
                            error_img_tensor = add_text_to_img(error_img_tensor, text, self.to_pil, self.to_tensor)
                            self.writer.add_image(f'错误分类的图像/{true_label}/{pred_label}', error_img_tensor, global_step=error_num)
                            error_imgs_tensor.append(error_img_tensor)
                            error_num += 1

                elif model.name in ['TripletLoss_Knn', 'TripletLoss_Knn_v2']:
                    batch_embeds, loss = model(x, y)
                    losses.append(loss.item())

                    assert model.trained_embeds is not None
                    distances, nearest_neighbors = model.knn_func(batch_embeds, k=self.config.k, reference=model.trained_embeds)
                    nearest_neighbors = nearest_neighbors.cpu()
                    model.trained_labels = model.trained_labels.cpu()
                    neighbor_labels = model.trained_labels[nearest_neighbors]

                    y_pred, _ = torch.mode(neighbor_labels, dim=1)  # y_pred: (num_val, )
                    y = y.cpu()
                    correct += (y_pred == y).sum()
                    for i in range(x.shape[0]):
                        true_label = data.i2label[int(y[i].cpu())]
                        pred_label = data.i2label[int(y_pred[i].cpu())]
                        if y[i] != y_pred[i]:
                            err_image_tensor = data.deNorm(x[i]).cpu().numpy()
                            self.writer.add_image(f'错误分类的图像/{true_label}/{pred_label}', err_image_tensor, global_step=error_num)
                            error_num += 1

                total += y.size(0)
        avg_latency = total_latency / len(data)
        self.logger.info(f"平均推理时延：{avg_latency}s")
        self.logger.info("test loss: %f, total: %d, correct: %d, accuracy: %f", 
                    np.mean(losses), total, correct, correct/total)

        error_imgs_tensor = torch.stack(error_imgs_tensor, dim=0)
        grid = make_grid(error_imgs_tensor, nrow=5)
        self.writer.add_image('Error_imgs', grid, 0)

        return np.mean(losses)

    def max_act_patch(self):
        """遍历数据集获取各卷积层最大的9个激活值并生成网格图
        1. 获取各卷积层每个通道的最大9个激活坐标及原图索引
        2. 利用感受野计算原图坐标并裁剪patch
        3. 生成各层的通道patch网格图
        """
        
        config = self.config
        model = self.model.module if hasattr(self.model, "module") else self.model
        model.eval()
        sample_x, sample_y, _ = self.dataset[0]
        C,H,W = sample_x.shape
        # print(f"sample.shape:{sample.shape}")
        
        self.logger.info(f"Model: {model.name}")
        self.logger.info(f"test on {Path(self.dataset.root).joinpath(self.dataset.mod)}")
        self.logger.info(f"INFO of dataset.transforms:{self.dataset.transforms.__dict__.__repr__()}")
        
        # 注册hook获取特征图
        register_hook_for_max_Path(model)

        output_temp = StringIO()
        sys.stdout = output_temp
        summary_data = torch.randn([self.config.batch_size, C, H, W]).to(self.device)
        summary_label = torch.ones([self.config.batch_size]).long().to(self.device)
        summary(model, input_data=[summary_data, summary_label])
        # 获取感受野信息
        rf_info = receptive_field(model, (sample_x.shape))
        sys.stdout = sys.__stdout__
        self.logger.info(output_temp.getvalue())
        
        # 创建所有层特征图层信息模板:
        """
        {
            layer_name:{
                c:  {
                    'max_val':[val, ...]
                    'img_idx': [None, ...]
                    'rf_coords': [None, ...]
                    'img_label':[None, ...]
                    # length of value is 9
                }
            }
        }
        """
        layer_data = defaultdict(lambda: defaultdict(lambda: {
            'top9_val': [-float('inf')]*9,
            'img_idx': [None]*9,
            'rf_coords': [None]*9,
            'img_label':[None]*9
        }))
        
        # 创建网格图保存目录
        grid_dir = Path(config.img_dir)
        grid_dir.mkdir(parents=True, exist_ok=True)
        
        # 遍历数据集
        data = self.dataset
        loader = DataLoader(data, batch_size=config.batch_size,
                          shuffle=False, num_workers=config.num_workers)
        
        pbar = tqdm(loader, total=len(loader))
        for batch_idx, (x, y, img_idx) in enumerate(pbar):
            x = x.to(self.device, non_blocking=True)
            y = y.to(self.device, non_blocking=True)
            img_idx = img_idx.to(self.device, non_blocking=True)
            
            # 确保在forward开始时清空缓存
            if hasattr(model, 'feature_maps'):
                if len(model.feature_maps) > 0:
                    model.feature_maps.clear()
            
            with torch.no_grad():
                # 前向传播获取各层特征
                model(x, y)
                model.layer_num = len(model.feature_maps)
                
                # 遍历所有卷积层
                assert hasattr(model, 'layer_num'), "Hook is not rigistered in model."
                assert hasattr(model, 'feature_maps'), "Hook is not rigistered in model."
                # assert hasattr(model, 'layer_indices'), "Hook is not rigistered in model."
                for layer_id in range(model.layer_num):
                    # 获取当前层特征图 [B,C,H,W]
                    feature_maps = get_feature_maps(model, layer_id)
                    receptive_layer_id = str(layer_id+1)
                    # layer_name = model.layer_indices[layer_id]
                    if feature_maps is None:
                        continue
                    
                    B, C, H, W = feature_maps.shape
                    
                    # 找到每个通道的最大激活位置
                    for c in range(C):
                        # 获取当前通道的特征图
                        channel_feats = feature_maps[:, c, :, :]
                        
                        # 找到特征图中的最大值（一共B个)
                        max_vals, max_indices = torch.max(channel_feats.view(B, -1), dim=1)
                        # 从B个最大值中，找到最大的9个激活值及其位置
                        num_topk = min(9, B)
                        batch_top9_val, batch_top9_idx = torch.topk(max_vals, num_topk, dim=0)
                        batch_top9_val = batch_top9_val.tolist()

                        # 计算激活值的空间位置
                        spatial_pos = []
                        for i in range(9):
                            sample_idx = batch_top9_idx[i]
                            pos = torch.argmax(channel_feats[sample_idx]).item()
                            h = pos // W
                            w = pos % W
                            spatial_pos.append((h,w))
                        
                        # 计算感受野坐标
                        rf_coords_list = []
                        for (h,w) in spatial_pos:
                            rf_unit = receptive_field_for_unit(rf_info, receptive_layer_id, (h,w))
                            rf_h_start, rf_h_end = rf_unit[0]
                            rf_w_start, rf_w_end = rf_unit[1]
                            rf_coords_list.append((
                                int(rf_h_start), int(rf_h_end), 
                                int(rf_w_start), int(rf_w_end)
                            ))
                        
                        # 获取对应的图片索引和标签
                        batch_img_idx = [img_idx[i].item() for i in batch_top9_idx] 
                        batch_img_lable = [self.dataset.i2label[y[i].item()] for i in batch_top9_idx]
                        
                        # 合并当前batch的top9和全局的top9
                        global_vals = layer_data[receptive_layer_id][c]['top9_val']
                        global_img_idx = layer_data[receptive_layer_id][c]['img_idx']
                        global_rf_coords = layer_data[receptive_layer_id][c]['rf_coords']
                        global_img_label = layer_data[receptive_layer_id][c]['img_label']
                        
                        # 创建合并列表
                        all_vals = global_vals + batch_top9_val
                        all_img_idx = global_img_idx + batch_img_idx
                        all_rf_coords = global_rf_coords + rf_coords_list
                        all_img_label = global_img_label + batch_img_lable
                        
                        # 按激活值排序并保留top9
                        combined = list(zip(all_vals, all_img_idx, all_rf_coords, all_img_label))
                        combined.sort(key=lambda x:x[0], reverse=True)
                        top9_combined = combined[:9]

                        # 解压更新全局数据
                        top9_vals, top9_img_idx, top9_rf_coords, top9_img_label = zip(*top9_combined)
                        layer_data[receptive_layer_id][c]['top9_val'] = list(top9_vals)
                        layer_data[receptive_layer_id][c]['img_idx'] = list(top9_img_idx)
                        layer_data[receptive_layer_id][c]['rf_coords'] = list(top9_rf_coords)
                        layer_data[receptive_layer_id][c]['img_label'] = list(top9_img_label)
        
        # 生成并保存各层的网格图
        for layer_name, channels_data in layer_data.items():
            # 收集该层所有通道的patch
            all_patches = []
            base_height, base_width = 0, 0
            
            # 先确定最大patch尺寸
            for c, channel_data in channels_data.items():
                for i in range(9):
                    if channel_data['img_idx'][i] is not None:
                        img_idx = channel_data['img_idx'][i]
                        x_original, _, _ = data[img_idx]
                        if "Normalize" in self.dataset.transforms.__dict__.__repr__():
                            x_original = self.dataset.deNorm(x_original)
                        rf_coords = channel_data['rf_coords'][i]
                        rf_h_start, rf_h_end, rf_w_start, rf_w_end = rf_coords
                        patch = x_original[:,
                                max(0, rf_h_start):min(x_original.shape[1], rf_h_end),
                                max(0, rf_w_start):min(x_original.shape[2], rf_w_end)]
                        if patch.numel() > 0:
                            h, w = patch.shape[1], patch.shape[2]
                            base_height = max(base_height, h)
                            base_width = max(base_width, w)
            
            # 没有有效patch则跳过该层
            if base_height == 0 or base_width == 0:
                continue
            
            # 收集所有patch并统一尺寸
            for c, channel_data in channels_data.items():
                for i in range(9):
                    if channel_data['img_idx'][i] is not None:
                        img_idx = channel_data['img_idx'][i]
                        x_original, _, _ = data[img_idx]
                        if "Normalize" in self.dataset.transforms.__dict__.__repr__():
                            x_original = self.dataset.deNorm(x_original)
                        rf_coords = channel_data['rf_coords'][i]
                        rf_h_start, rf_h_end, rf_w_start, rf_w_end = rf_coords
                        patch = x_original[:,
                                max(0, rf_h_start):min(x_original.shape[1], rf_h_end),
                                max(0, rf_w_start):min(x_original.shape[2], rf_w_end)]
                        
                        # 统一尺寸
                        if patch.shape[1] < base_height or patch.shape[2] < base_width:
                            pad_h = base_height - patch.shape[1]
                            pad_w = base_width - patch.shape[2]
                            patch = F.pad(patch, (0, pad_w, 0, pad_h), value=0)
                        elif patch.shape[1] > base_height or patch.shape[2] > base_width:
                            patch = transforms.functional.center_crop(patch, (base_height, base_width))
                        
                        all_patches.append(patch)
                    else:
                        # 用黑色patch填充
                        all_patches.append(torch.zeros(3, base_height, base_width))
            
            # 生成网格图 (每行显示9个patch，每个通道一行)
            # 按通道分组：每个通道9个patch
            num_channels = len(channel_data)
            grouped_patches = [all_patches[i*9 : (i+1)*9] for i in range(num_channels)]
            
            # 为每个通道生成一行网格
            rows = []
            for patches in grouped_patches:
                # 生成一行网格，包含9个patch
                row_grid = make_grid(patches, nrow=9, padding=2, normalize=True, scale_each=True, pad_value=0)
                rows.append(row_grid)
            
            # 如果没有任何行，则跳过
            if len(rows) == 0:
                continue
                
            # 获取行网格的尺寸
            base_height = rows[0].shape[1]
            base_width = rows[0].shape[2]
            red_line_height = 1  # 红色分隔线高度
            
            # 计算总网格的高度
            total_height = num_channels * base_height + (num_channels - 1) * red_line_height
            total_width = base_width
            
            # 创建总网格
            final_grid = torch.zeros(3, total_height, total_width)
            
            current_height = 0
            for i, row in enumerate(rows):
                # 放置当前行网格
                final_grid[:, current_height:current_height+base_height, :] = row
                current_height += base_height
                # 如果不是最后一行，则添加红色分隔线
                if i < num_channels - 1:
                    # 设置红色：使用[1.0, 0.0, 0.0]表示红色
                    final_grid[:, current_height:current_height+red_line_height, :] = torch.tensor([1.0, 0.0, 0.0]).view(3,1,1)
                    current_height += red_line_height
                
                # 转换成PIL图像
                grid_img = self.to_pil(final_grid)
                
                # 保存结果
                grid_path = grid_dir / f"{layer_name}_grid.png"
                grid_img.save(grid_path)
                self.logger.info(f"Saved activation grid for {layer_name} to {grid_path}")

        json_path = grid_dir / "layer_data.json"
        with open(json_path, "w", encoding="utf-8") as f:
            json.dump({k:dict(v) for k, v in layer_data.items()}, f, ensure_ascii=False, indent=2)
            self.logger.info(f"Saved layer_data to {json_path}")
        
        # 统计各层最大激活值对应图片
        layer_statistic = defaultdict(lambda: defaultdict(lambda: 0))
        for layer_name, channels_data in layer_data.items():
            for c in channels_data:
                current_font = channels_data[c]['img_label']
                layer_statistic[layer_name][f"{current_font}"] += 1
            # 使用排序后的键创建一个新的字典
            sorted_keys = sorted(layer_statistic[layer_name].keys())
            sorted_dict = {key: layer_statistic[layer_name][key] for key in sorted_keys}
            layer_statistic[layer_name] = sorted_dict

        layer_statistic_path = grid_dir / "layer_statistic.json"
        with open(layer_statistic_path, "w", encoding="utf-8") as f:
            json.dump(dict(layer_statistic), f, ensure_ascii=False)  # 转为普通字典后保存
            self.logger.info(f"Saved layer_data to {layer_statistic_path}")

    def gradCam(self):
        """单样本测试和热力图显示"""
        model = self.model.module if hasattr(self.model, "module") else self.model
        assert model.name in ['ArcFont', 'MDW_Net', 'MobileNetV4_Font'], "该模型尚未支持特征图查看功能"
        train_dataset = self.dataset[0]
        val_dataset = self.dataset[1]
        
        # GradCam会自动执行model.eval()，所以这里不用写
        # model.eval()
        targetLayers = [model.features]

        print(f"Length of test_data:{len(self.dataset)}")
        
        while(True):
            check_mode = input("check_mode: 1 for free_check, 2 for auto_check: ")
            try:
                check_mode = int(check_mode)
                if check_mode == 1 or check_mode == 2:
                    break
            except ValueError:
                print("这不是一个有效的数字，请重新输入。")
                
        if check_mode == 1:
            #自由测试模式
            used_set = None
            # 选择数据集
            while(True):
                dataset_mode = input("dataset_mode: 1 for train_set, 2 for val_set: ")
                try:
                    dataset_mode = int(dataset_mode)
                    if dataset_mode == 1:
                        used_set = train_dataset
                        break
                    elif dataset_mode == 2:
                        used_set = val_dataset
                        break
                except ValueError:
                    print("这不是一个有效的数字，请重新输入。")
            while(True): 

                while True:
                    sample_idx = input("input sample_id:")
                    try:
                        # 尝试将输入转换为整数
                        sample_idx = int(sample_idx)
                        # 如果转换成功，则跳出循环
                        break
                    except ValueError:
                        # 如果转换失败，捕获异常并提示用户重新输入
                        print("这不是一个有效的数字，请重新输入。")
                    
                # 验证样本索引
                if sample_idx < 0 or sample_idx >= len(used_set):
                    raise ValueError(f"Invalid sample index: {sample_idx}. Must be between 0 and {len(used_set)-1}")
                    
                # 获取指定样本[c,h,w]
                x, y, idx = used_set[sample_idx]   # x:(1,96,96) y:(1)
                print(f"y.shape:{y.shape}")
                x = x.unsqueeze(0).to(self.device)  # -> (1, 1, 96, 96)
                y = y.unsqueeze(0).to(self.device) # -> (1, 1)
                
                targets = [ClassifierOutputTarget(y)]
                input_tensor = x
                
                with ShapleyCAM(model=model, target_layers=targetLayers) as cam:
                    grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
                    grayscale_cam = grayscale_cam[0, :]
                    rgb_img = L_to_RGB(used_set.deNorm(input_tensor).cpu())
                    visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
                    model_outputs = cam.outputs
                
                # 显示预测结果
                y_pred = model_outputs[0].argmax(-1)
                true_label = used_set.i2label[int(y[-1].cpu())]
                pred_label = used_set.i2label[int(y_pred.cpu())]
                print(f"\nSample Index: {sample_idx}")
                print(f"True Label: {true_label}")
                print(f"Predicted Label: {pred_label}")
                
                
                plt.imshow(visualization)
                plt.show()
                gradCam_img = self.to_pil(visualization)
                img_dir = Path(self.config.img_dir)
                img_dir.mkdir(parents=True, exist_ok=True)
                img_path = img_dir / f"{idx}_gradCam.png"
                gradCam_img.save(img_path)
                print(f"Saved grad_Cam for idx: {idx} to {img_path}")
        else:
            # 永字在数据集nankai_black中的id
            # train_samples_id = [453, 3418, 4215, 5007, 5791, 6596, 8978, 9777, 10571, 12142]
            # val_samples_id = [313, 514, 1845, 2040, 2835, 3240]
            
            # 武字在数据集nankai_black中的id
            train_samples_id = [130, 1701, 4669, 5453, 7053, 8637, 9448, 10231, 11024, 12616]
            val_samples_id = [223, 777, 964, 1567, 1958, 2947]

            for id in train_samples_id:
                # 获取指定样本[c,h,w]
                x, y, idx = train_dataset[id]   # x:(1,96,96) y:(1)
                print(f"y.shape:{y.shape}")
                x = x.unsqueeze(0).to(self.device)  # -> (1, 1, 96, 96)
                y = y.unsqueeze(0).to(self.device) # -> (1, 1)
                
                targets = [ClassifierOutputTarget(y)]
                input_tensor = x
                
                with ShapleyCAM(model=model, target_layers=targetLayers) as cam:
                    grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
                    grayscale_cam = grayscale_cam[0, :]
                    rgb_img = L_to_RGB(train_dataset.deNorm(input_tensor).cpu())
                    visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
                    model_outputs = cam.outputs
                
                # 显示预测结果
                y_pred = model_outputs[0].argmax(-1)
                true_label = train_dataset.i2label[int(y[-1].cpu())]
                pred_label = train_dataset.i2label[int(y_pred.cpu())]
                print(f"\nSample Index: {idx}")
                print(f"True Label: {true_label}")
                print(f"Predicted Label: {pred_label}")
                
                gradCam_img = self.to_pil(visualization)
                img_dir = Path(self.config.img_dir)
                img_dir.mkdir(parents=True, exist_ok=True)
                img_path = img_dir / f"{idx}_{true_label}_gradCam.png"
                gradCam_img.save(img_path)
                print(f"Saved grad_Cam for idx: {idx} to {img_path}")
            
            for id in val_samples_id:
                # 获取指定样本[c,h,w]
                x, y, idx = val_dataset[id]   # x:(1,96,96) y:(1)
                print(f"y.shape:{y.shape}")
                x = x.unsqueeze(0).to(self.device)  # -> (1, 1, 96, 96)
                y = y.unsqueeze(0).to(self.device) # -> (1, 1)
                
                targets = [ClassifierOutputTarget(y)]
                input_tensor = x
                
                with ShapleyCAM(model=model, target_layers=targetLayers) as cam:
                    grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
                    grayscale_cam = grayscale_cam[0, :]
                    rgb_img = L_to_RGB(val_dataset.deNorm(input_tensor).cpu())
                    visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True)
                    model_outputs = cam.outputs
                
                # 显示预测结果
                y_pred = model_outputs[0].argmax(-1)
                true_label = val_dataset.i2label[int(y[-1].cpu())]
                pred_label = val_dataset.i2label[int(y_pred.cpu())]
                print(f"\nSample Index: {idx}")
                print(f"True Label: {true_label}")
                print(f"Predicted Label: {pred_label}")
                
                gradCam_img = self.to_pil(visualization)
                img_dir = Path(self.config.img_dir)
                img_dir.mkdir(parents=True, exist_ok=True)
                img_path = img_dir / f"{idx}_{true_label}_gradCam.png"
                gradCam_img.save(img_path)
                print(f"Saved grad_Cam for idx: {idx} to {img_path}")

        return true_label, pred_label


    def umap_features(self):
        config = self.config
        model = self.model.module if hasattr(self.model, "module") else self.model
        state = self.state


        assert model.name in ['ArcFont', 'MDW_Net', 'MobileNetV4_Font'], "该模型尚未支持"
        model.eval()
        print(f"Length of test_data:{len(self.dataset)}")
        
        # dataset[1]是val dataset
        data = self.dataset[1]
        loader = DataLoader(data, batch_size=config.batch_size, shuffle=True, num_workers=config.num_workers)
        
        # 使用新变量存储特征，避免与model.features冲突
        collected_features = []
        labels = []
        filenames = []

        def hook_fn(module, input, output):
            # 捕获输出特征而不是输入
            feature = input[0].detach().cpu().numpy()
            collected_features.append(feature)

        hook_handle = model.classifier.register_forward_hook(hook_fn)
        
        # 清空特征缓存
        collected_features.clear()

        with torch.no_grad():
            for x, y, idx in tqdm(loader, desc="提取特征"):
                x = x.to(self.device, non_blocking=True)
                y = y.to(self.device, non_blocking=True)
                # 修复: 使用正确的idx变量
                labels.extend(y.cpu().numpy())
                filenames.extend(idx)
                
                # 前向传播（触发钩子）
                _, _ = model(x, y)
        
        # 移除钩子
        hook_handle.remove()
        
        # 合并所有特征向量
        features = np.vstack(collected_features)
        labels = np.array(labels)
        print(f"收集到 {features.shape[0]} 个样本的特征向量")
        print(f"特征维度: {features.shape[1]}")

        # 确保可视化目录存在
        import os
        os.makedirs('visualizations', exist_ok=True)

        # 使用UMAP降维到3D或2D
        print("运行UMAP降维...")
        if state == '3d':
            reducer = umap.UMAP(
                n_components=3,
                n_neighbors=15,
                min_dist=0.1,
                metric='euclidean',
                random_state=42
            )
        else:
            reducer = umap.UMAP(
                n_components=2,
                n_neighbors=15,
                min_dist=0.1,
                metric='euclidean',
                random_state=42
            )
            
        embedding = reducer.fit_transform(features)
        
        # 3D可视化
        print(f"创建{state}可视化...")
        fig = plt.figure(figsize=(12, 10))
        if state == '3d':
            ax = fig.add_subplot(111, projection='3d')
        else:
            ax = fig.add_subplot(111)
        
        # 创建颜色映射（根据标签）
        unique_labels = np.unique(labels)
        num_classes = len(unique_labels)
        cmap = plt.get_cmap('tab20', num_classes)
        
        # 绘制每个类别的点
        for i, label in enumerate(unique_labels):
            mask = labels == label
            if state == '3d':
                ax.scatter(
                    embedding[mask, 0], 
                    embedding[mask, 1], 
                    embedding[mask, 2],
                    color=cmap(i),
                    label=f'Class {label}',
                    alpha=0.7,
                    s=20
                )
            else:
                ax.scatter(
                    embedding[mask, 0], 
                    embedding[mask, 1], 
                    color=cmap(i),
                    label=f'Class {label}',
                    alpha=0.7,
                    s=20
                )
                
        
        # 添加图例
        ax.legend(loc='upper right', bbox_to_anchor=(1.15, 1))
        
        # 设置坐标轴标签
        ax.set_xlabel('UMAP Dimension 1')
        ax.set_ylabel('UMAP Dimension 2')
        if state == '3d':
            ax.set_zlabel('UMAP Dimension 3')
            ax.set_title('3D Feature Space Visualization (UMAP)')
        else:
            ax.set_title('2D Feature Space Visualization (UMAP)')
            
        
        # 调整视角
        if state == '3d':
            ax.view_init(elev=30, azim=45)
        
        # 保存可视化结果
        plt.tight_layout()
        plt.savefig('visualizations/3d_feature_space_umap.png', dpi=300)
        print("可视化结果已保存至 visualizations/3d_feature_space_umap.png")
        
        # 保存特征向量和嵌入结果
        np.savez_compressed(
            'visualizations/feature_data.npz',
            features=features,
            embedding=embedding,
            labels=labels,
            filenames=filenames if filenames else None
        )
        
        return embedding, labels
# 