import torch
import torch.nn as nn
import torch.nn.functional as F
import pandas as pd
import csv
import random
import numpy as np
import shutil
import itertools
from collections import defaultdict
from io import BytesIO
import os
import re
import glob


from IPython import embed

from PIL import Image, ImageDraw, ImageFont
import matplotlib.pyplot as plt
from torchvision import transforms
# from torch_receptive_field import receptive_field, receptive_field_for_unit


import sys
from pathlib import Path
from tqdm import tqdm
import logging
import json


def set_seed(seed):
    random.seed(seed)       #python
    np.random.seed(seed)    #numpy
    torch.manual_seed(seed) # CPU
    os.environ['PYTHONASHEDD'] = str(seed)
    torch.cuda.manual_seed_all(seed)    #所有GPU
    torch.backends.cudnn.benchmark = False
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.enabled = True
    os.environ['CUBLAS_WORKSPACE_CONFIG'] = ':16:8'
    torch.use_deterministic_algorithms(True)

def get_gb2312_chars(level=0):
    """
    extracted all the chinese characters from gb2312 charset, and save to ./data/charlist/gb2312_chars.csv
    提取GB2312字符集的汉字，支持分级提取
    :param level: 0-全部字符(6763字) 1-一级汉字(3755字) 默认0
    :return: 对应级别的汉字列表
    """
    suffix = "_level1" if level == 1 else "_all"
    gb2312_chars_path = f'./data/charlist/gb2312_chars{suffix}.csv'
    gb2312_chars_path = Path(gb2312_chars_path)
    gb2312_chars = []
    if not gb2312_chars_path.exists():
        gb2312_chars_path.parent.mkdir(parents=True, exist_ok=True)
        print(f"Create directory '{gb2312_chars_path.parent}'")
        
        # 根据级别设置编码范围
        first_byte_range = {
            0: range(0xB0, 0xF8),    # 全量范围: 0xB0-0xF7
            1: range(0xB0, 0xD8)     # 一级汉字范围: 0xB0-0xD7
        }[level]

        # 迭代GB2312编码范围
        for i in first_byte_range:  # 第一字节从0xB0到0xF7
            for j in range(0xA1, 0xFF + 1):  # 第二字节从0xA1到0xFF
                # 构建GB2312编码的字节流
                char_bytes = bytes([i,j])
                # 尝试将GB2312编码转换为字符,如果不是GB2312编码则跳过
                try:
                    char = char_bytes.decode('GB2312')
                    gb2312_chars.append(char)
                except UnicodeDecodeError:
                    continue
        
        # 验证字符数量
        expected_counts = {0: 6763, 1: 3755}[level]
        if len(gb2312_chars) != expected_counts:
            print(f"Warning: Expected {expected_counts} characters, got {len(gb2312_chars)}")

        try:
            pd.Series(gb2312_chars).to_csv(str(gb2312_chars_path), header=['char'], index_label='index')
            print(f"Save GB2312 characters to {gb2312_chars_path}")
        except Exception:
            print(f"Exception raised in saving gb2312 characters to {gb2312_chars_path}")
        # 打印列表中的字符数量
        print(f"Total GB2312 characters were extracted: {len(gb2312_chars)}")
    gb2312_chars = get_charlist(gb2312_chars_path)
    return gb2312_chars

def get_Nankai_chars():
    """
    extracted the chinese characters from Nankai-Chinese-Font-Style-Dataset.
    """
    TRAIN_CHARS_DIR = './data/Nankai-Chinese-Font-Style-Dataset-master/FontData/train/黑体'
    VAL_CHARS_DIR = './data/Nankai-Chinese-Font-Style-Dataset-master/FontData/val/黑体'
    TRAIN_CHARS_DIR = Path(TRAIN_CHARS_DIR)
    VAL_CHARS_DIR = Path(VAL_CHARS_DIR)
    assert Path(TRAIN_CHARS_DIR).exists()
    assert Path(VAL_CHARS_DIR).exists()
    train_chars = [i.stem for i in TRAIN_CHARS_DIR.iterdir()]
    val_chars = [i.stem for i in VAL_CHARS_DIR.iterdir()]
    print(f"Extracted {len(train_chars)} train chars and {len(val_chars)} val chars.")
    return train_chars, val_chars


def get_ecdict_words(ecdict_csv, word_min_len, word_max_len, word_sample_size, seed):
    """
    1. extracted English vocabulary from ecdict. 
    2. filter vocabulary to words that length between 9-10.
    2. Sample `word_sample_size` words from filtered words.
    """
    random.seed(seed)
    ecdict_csv = Path(ecdict_csv)
    try:
        assert ecdict_csv.exists()
    except Exception:
        print(f"{ecdict_csv} not found.")

    f = pd.read_csv(str(ecdict_csv))
    phrase_list = list(f.iloc[:, 0])
    print(f"extracted {len(phrase_list)} phrases.")
    #print(phrase_list[125822])
    word_list = []
    print("Starting add word from phrase_list to word_list:")
    for i, phrase in enumerate(tqdm(phrase_list), start=1):
        if not isinstance(phrase, str):
            phrase = str(phrase)
        for word in str(phrase).split():
            word_list.append(word)

    print("Transfer list to set...")
    unique_words = set(word_list)
    print(f"reduced to {len(unique_words)} words")

    words_filtered = [w for w in unique_words if word_min_len <= len(w) and len(w) <= word_max_len]
    print(f"filtered to {len(words_filtered)} words with length between {word_min_len} and {word_max_len}")

    words_sample = random.sample(words_filtered, word_sample_size)
    print(f"sampled {word_sample_size} word")
    return words_sample

# read word list from csv file
def get_charlist(char_csv_dir):
    df = pd.read_csv(char_csv_dir)
    char_list = list(df.iloc[:,1])
    print(f"Get {len(char_list)} charactors from {char_csv_dir}.")
    return char_list

def get_test_chars_for_nankai(char_sample_size, seed):
    random.seed(seed)
    try:
        gb2312_chars = get_charlist('./data/charlist/gb2312_chars.csv')
        nankai_chars = get_charlist('./data/charlist/train_chars.csv') + get_charlist('./data/charlist/val_chars.csv')
    except Exception:
        print("failed to read words list")
        sys.exit(1)

    _test_chars = list(set(gb2312_chars) - set(nankai_chars))
    test_chars = random.sample(_test_chars, char_sample_size)
    print(f"sampled {len(test_chars)} chars from gb2312, used for test dataset")
    return test_chars

def generator(words_csv, mod='train', output_folder='./dataset/',
              font_folder='./data/fonts', data_aug_size=0, image_size=(200, 200), font_size=96, seed=42):
    '''
    Synthetic VFR Data Generator. Generates images of words using TrueType fonts and saves them in a specified output folder.

    Parameters:
    - words_csv (str): csv file pathname, which contains List of words to generate images for.
    - output_folder (str): Path to the folder where generated images will be saved.
    - font_folder (str): Path to the folder containing TrueType font files (.ttf, ttc).
    - mod (str): which type of image to generate. ('train', 'val', 'test')
    - data_aug_size (int): Number of copies of image, Using rotation Data Augumentation, zero for no Data Augumentation.
    - image_size ( (int, int) ): Tuple representing the dimensions (width, height) of the generated images.
    - font_size ( int ): Font size to be used for rendering the words.

    Output:
    The generator creates a separate subfolder for each font in the output folder and saves
    individual images for each word using the specified fonts. The images are saved in JPEG format.

    Example Usage:

    words_read_from_csv = ['hello', 'world', 'python', 'anaconda']
    font_folder = '/path/fonts'
    image_size = (400, 200)
    font_size = 100
    output_folder = '/path/output'

    generator(words_to_generate, font_folder_path, image_dimensions, font_size, output_folder_path)
    ```

    Note:
    - Requires the Pillow library for image processing.
    - The output folder will be created if it doesn't exist.

    '''
    random.seed(seed)

    font_folder = Path(font_folder)

    # Create output directory
    output_folder = Path(output_folder)
    if not output_folder.exists():
        output_folder.mkdir(parents=True)
        print(f"Create directory {output_folder} for saveing the font_image")

    # List of all fonts of the respective language
    font_files = [f for f in font_folder.iterdir() if f.suffix.lower() == '.ttf' or f.suffix.lower() == '.ttc']

    if isinstance(words_csv, str):
        assert Path(words_csv).exists()
        words = get_charlist(words_csv)
    elif isinstance(words_csv, list):
        words = words_csv

    print(f"Starting generate {mod} image from words and fonts")
    count = 0       # for caculate the total number of generated image
    for font_file in font_files:
        # Load a TrueType font file and create a font object
        font_name = font_file.stem
        font_path = str(font_file)
        font = ImageFont.truetype(font_path, font_size)
        print(f"Font Name - {font_name}")

        # Create a output subfolder for the respective font
        assert mod in {'train', 'val', 'test'}
        if mod == 'train':
            font_output_folder = output_folder.joinpath('train').joinpath(font_name)
            font_output_folder.mkdir(parents=True, exist_ok=True)
        elif mod == 'val':
            font_output_folder = output_folder.joinpath('val').joinpath(font_name)
            font_output_folder.mkdir(parents=True, exist_ok = True)
        else:
            font_output_folder = output_folder.joinpath('test').joinpath(font_name)
            font_output_folder.mkdir(parents=True, exist_ok = True)

        for i, word in enumerate(tqdm(words), start=0):
            for j in range(data_aug_size+1):
                # Draw a grayscale blank image
                word_img = Image.new('LA', image_size, (255,0))
                draw = ImageDraw.Draw(word_img)

                # Define anchor coordinates of the word to be printed
                x = image_size[0] // 2
                y = image_size[1] // 2

                # Print the word on the blank image, center aligned
                draw.text((x, y), word, font=font, fill=(0,255), anchor='mm')

                # Generate Data_Augumented image: rotate image between (-30, 30) degree.
                if j > 0:
                    angle = random.uniform(-30, 30)
                    word_img = word_img.rotate(angle)

                # print(f"{font_name}_{word}_{i}_{j}.jpg")
                count += 1
                
                # to avoid black edge in rotated img, two line code below cost me a whole day
                img = Image.new('L', image_size, 255)
                img.paste(word_img, mask=word_img.split()[1])
                
                # Save the word image
                image_filename = f"{font_name}_{word}_{i}_{j}.jpg"
                image_path = font_output_folder.joinpath(image_filename)
                # word_img, _ = word_img.split()  # Get the luminance channel value
                img.save(str(image_path))
    print(f"Generated {count} image from {words_csv}\n")

def data_aug(img_folder, num=1):
    """
    apply to each image in img_foler, and create 'num' variant copy
    """
    img_folder = Path(img_folder)
    for i, file in enumerate(tqdm(img_folder.rglob('*')), start=0):
        if file.is_file() and file.suffix in ['.jpg', '.jpeg', '.png']:
            img = Image.open(str(file)).convert('L')    # H, W, 1
            for j in range(num):
                backgroud = Image.new('L', img.size, 255)
                angle = random.uniform(-30, 30)
                dag_img = img.rotate(angle,fillcolor=255)
                backgroud.paste(dag_img)
                img_path = file.parent / f"{file.stem}_dag_{j+1}{file.suffix}"
                backgroud.save(str(img_path))



def cal_mean_std(img_folder:str):
    mean = 0    # 'L' image has only 1 channel
    std = 0     # 'L' image has only 1 channel
    nb_samples = 0
    for i, file in enumerate(tqdm(Path(img_folder).rglob('*')), start=0):
        if file.is_file() and file.suffix in ['.jpg', '.jpeg', '.png']:
            img = Image.open(str(file)).convert('L')    # H, W, 1
            img_tensor = transforms.ToTensor()(img).to(torch.float64) # 1, H, W
            C, H, W = img_tensor.shape[:3]
            # print(C, H, W)
            img_tensor = img_tensor.view(C, -1)
            mean += img_tensor.mean(1)
            std += img_tensor.std(1)
            nb_samples += 1
    mean /= nb_samples
    std /= nb_samples
    print(f"mean:{mean}, std:{std}")
    # nankai_raw  mean:0.8401  std:0.3287
    # nankai_black  mean:0.1599  std:0.3295

def convert_image_color(src_dir, dst_dir, inverse=True, resize=None):
    """
    递归转换目录中的图片颜色并保持目录结构
    params:
        src_dir: 源目录路径 
        dst_dir: 目标目录路径
        inverse: 是否反转颜色（白底黑字->黑底白字）
    """
    src_path = Path(src_dir)
    dst_path = Path(dst_dir)
    
    for item in tqdm(src_path.rglob('*'), desc='Processing files'):
        relative_path = item.relative_to(src_path)
        target_path = dst_path / relative_path
        
        # 处理目录
        if item.is_dir():
            target_path.mkdir(parents=True, exist_ok=True)
            continue
            
        # 处理文件
        try:
            # 复制非图片文件
            if item.suffix.lower() not in ['.jpg', '.jpeg', '.png']:
                target_path.parent.mkdir(parents=True, exist_ok=True)  # 确保目标目录存在
                if not target_path.exists():
                    shutil.copy2(item, target_path)  # 使用copy2保留元数据
                continue
                
            # 处理图片文件
            with Image.open(item) as img:
                # 确保处理后的图片是标准L模式（8位像素，黑白）
                if img.mode == 'RGBA':
                    # 透明背景转黑底
                    background = Image.new('RGBA', img.size, (0, 0, 0, 255))
                    background.paste(img, mask=img.split()[-1])
                    img = background.convert('L')
                elif img.mode != 'L':
                    img = img.convert('L')
                
                # 反转颜色（白底黑字 -> 黑底白字）
                if inverse:
                    img = Image.eval(img, lambda x: 255 - x)
                
                if resize:
                    img = img.resize(resize)
                
                # 确保像素值在0-255范围
                img = img.point(lambda x: 0 if x < 15 else 255 if x > 240 else x)
                
                # 保存为灰度图并保持原始文件名
                target_path.parent.mkdir(parents=True, exist_ok=True)
                img.save(target_path, format=img.format, quality=100)
                
        except Exception as e:
            print(f"Error processing {item}: {str(e)}")

def init_log(name, log_dir):
    """
    当使用logger.info时，同时输出到控制台和文件
    当使用logger.debug,logger.warning, logger.error时，仅写入文件
    """
    logger = logging.getLogger(name)
    logger.setLevel(logging.DEBUG)
    logger.handlers.clear()
    file_handler = logging.FileHandler(log_dir)
    logger.addHandler(file_handler)
    
    # 控制台 Handler（仅处理 INFO）
    class InfoFilter(logging.Filter):
        def filter(self, record):
            return record.levelno == logging.INFO
    console_handler = logging.StreamHandler(sys.stdout)
    console_handler.setLevel(logging.INFO)    # 基础级别过滤
    console_handler.addFilter(InfoFilter())   # 精确匹配 INFO
    logger.addHandler(console_handler)

    return logger

def convert_to_L(input_folder):
    input_folder = Path(input_folder)
    for path in input_folder.rglob('*'):
        if path.is_file() and path.suffix in ['.jpg', '.jpeg', '.png']:
            img = Image.open(path)
            gray_img = img.convert('L')
            gray_img.save(path)
            
def L_to_RGB(input: torch.Tensor):
    # input : tensor of [1, 1, H, W]
    # output: RGB image of np.ndarray
    assert input.shape[0] == 1 and input.shape[1] == 1
    img = input.squeeze().numpy()
    rgb_img = np.stack((img, img, img), axis=-1)
    # rgb_img = (rgb_img * 255).astype(np.uint8)
    return rgb_img
    

def detect_content_box(img, threshold=250, margin=1):
    try:
        np_img = np.array(img)
        # detect row
        x_proj = np.any(np_img < threshold, axis=0)
        x_min = np.argmax(x_proj)
        x_max = len(x_proj) - np.argmax(x_proj[::-1])
        # detect column
        y_proj = np.any(np_img < threshold, axis=1)
        y_min = np.argmax(y_proj)
        y_max = len(y_proj) - np.argmax(y_proj[::-1])
        
        # left, top, right, bottom
        return (max(0, x_min-margin), max(0, y_min-margin), 
                min(np_img.shape[1], x_max + margin), min(np_img.shape[0], y_max + margin))
    except:
        return (0, 0, img.width, img.height)

def count_parameters(model):
    """
    count the total numbers of parameters of a model
    arg:
        model(nn.Module): pytorch model
    return:
        dict:parameters count of a named layer
        total:total number of parameters of the model
    """
    total = 0
    for name, p in model.named_parameters():
        if p.requires_grad:
            count = p.numel()
            total += count
            print(f"{name}:{count}")
    print(f"total:{total}")

def logit_2_prob(logit, i2label):
    """
    lookup the logit value of every class
    arg：
        logit: tensor:(num_classes):
        i2label: a dict used to convert integer to label strings.
    return:
        probs_dict: dict{label:probability}, len of dict is the num_classes
    """
    assert logit.dim() == 1
    probs = F.softmax(logit, dim=-1)
    probs_dict = dict()
    for i in range(len(logit)):
        label = i2label[i]
        prob = round(probs[i].item(),4)
        probs_dict[label] = prob
    return probs_dict

def add_text_to_img(tensor_img, text, fn_toPIL, fn_toTensor):
    img = fn_toPIL(tensor_img)
    draw = ImageDraw.Draw(img)
    font = ImageFont.truetype('/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc', 10, encoding='utf-8')
    draw.text((1,1),text, fill=(0), font=font)
    img = fn_toTensor(img)
    return img

def cycle_triangular(x, T, a, start=0.0):
    """
    input:
        x (float): Input value
        T (float): Period
        a (float): Duration of the rising phase
        start (float): Initial value (default is 0.0)
    ouput:
        y (float): Initial value (default:0) to 1
    """
    if T <= 0:
        raise ValueError("Period T must be a positive number")
    if a <= 0 or (T - a) <= 0:
        raise ValueError("Parameter a must be a positive number, and b = T - a must also be positive")

    b = T - a  # Calculate the duration of the falling phase

    # Calculate the position of x within the period
    x_in_period = x % T

    if x_in_period < a:
        # Rising time
        y = start + x_in_period  # y linearly increases
    elif x_in_period < T:
        # Falling time
        max_value = start + a
        slope_down = (start - max_value) / b
        y = max_value + slope_down*(x_in_period -a) # y linearly decreases from 1 to 0
    else:
        # Return to the initial value
        y = start
    return y


def register_hook_for_max_Path(model):
    # 初始化特征图缓存和层索引
    model.feature_maps = {}
    def _make_hook_fn(layer_idx):
        def hook(module, input, output):
            model.feature_maps[layer_idx] = output.detach()
        return hook
    # 为每个网络层注册前向钩子
    submodules_list = []
    # 第一层筛选，去除全局池化层和分类器层
    for name, net in model.named_children():
        # 不为gap和fc, classifier层添加钩子, 其它的stem和features都是nn.Sequential
        if name not in ['gap', 'fc', 'classifier']:
            for layer in net.children():
                #把MDW_Block也当作列表看待，因为其比较特殊，有多种子模块,需要进一步展开
                if layer.__class__.__name__ == "MDW_Block":
                    for name, block in layer.named_children():
                        # 对于depthwise和proj_layer,本身就是序列，直接添加
                        if name == 'depthwise' or name == 'proj_layer':
                            submodules_list.append(block)
                        # 对于其它，用列表封装一次
                        else: 
                            submodules_list.append([block])
                elif layer.__class__.__name__ == "Sequential" and layer.name == "conv_2d":
                    submodules_list.append(layer)

                # 将Sequential, ModuleList等子模块不封装，直接添加进_modules
                # 注意这里不支持Sequential的嵌套
                else:
                    submodules_list.append([layer])
    _modules = itertools.chain(*submodules_list)
    for idx, layer in enumerate(_modules):
        if layer.__class__.__name__ in ['ShuffleLayer', 'to_channels_last', 'to_channels_first']:
            continue
        layer.register_forward_hook(_make_hook_fn(idx))
        
def get_feature_maps(model, layer_idx='all'):
    """获取指定层的特征图
    Args:
        model (module): 获取特征图的模型
        layer_idx (int): 层索引(1-model.layer_num)
    Returns:
        Tensor: 特征图张量，形状为(batch, channels, height, width)
        if layer_idx = 'all:
            Dict: 特征图张量字典 
    """
    if layer_idx == 'all':
        return model.feature_maps       
    else:
        try:
            # key = model.layer_indices.get(int(layer_idx))
            key = int(layer_idx)
            return model.feature_maps[key]
        except:
            raise ValueError(f"Invalid layer index: {layer_idx}. Valid indices are 1-{model.layer_num}")

def find_common_char(root_dir):
    # input_folder: str, example:'./dataset/val'
    # 获取所有子文件夹路径
    subdirs = [d for d in Path(root_dir).iterdir() if d.is_dir()]
    
    # 存储每个子文件夹的汉字集合和路径映射
    subdir_data = []
    for subdir in subdirs:
        char_paths = defaultdict(list)
        for file in subdir.glob('*'):
            if file.is_file():
                char = file.stem  # 获取不带扩展名的文件名
                char_paths[char].append(str(file.resolve()))
        subdir_data.append((set(char_paths.keys()), char_paths))

    # 计算所有子文件夹的汉字交集
    common_chars = set.intersection(*[data[0] for data in subdir_data]) if subdir_data else set()
    
    # 收集公共汉字的路径
    result = defaultdict(list)
    for char in common_chars:
        for data in subdir_data:
            result[char].extend(data[1][char])
    
    data = dict(result) 
    csv_path = root_dir + '/common.csv'
    with open(csv_path, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(['汉字', '图片路径'])
        for char, paths in data.items():
            writer.writerow([char, ';'.join(paths)])    
    
    return dict(result)    

def act_func_view():
# 生成输入范围和激活函数数据
    def swish(x,beta=1.):
        return x*F.sigmoid(x*beta)

    x_tensor = torch.linspace(-5, 7, 100)
    activations1 = {
        "ReLU": F.relu(x_tensor),
        "LeakyReLU": F.leaky_relu(x_tensor),
        "ELU": F.elu(x_tensor),
        "ReLU6": F.relu6(x_tensor),
    }
    activations2 = {
        "GeLU": F.gelu(x_tensor),
        "Mish": F.mish(x_tensor),
        "Swish": swish(x_tensor),
        "H-Swish": F.hardswish(x_tensor),
    }
    x = x_tensor.numpy()
    activations = {k: v.numpy() for k, v in activations2.items()}

    # 创建Matplotlib子图
    plt.figure(figsize=(20, 5))
    colors = ['blue', 'orange', 'green', 'purple']
    
    # 计算全局坐标范围
    all_y = np.concatenate(list(activations.values()))
    global_ymin = np.floor(np.min(all_y) - 0.5)  # -2.0
    global_ymax = np.ceil(np.max(all_y) + 0.5)   # 7.0

    for i, (name, y) in enumerate(activations.items(), 1):
        plt.subplot(1, 4, i)  # 改为1行4列
        plt.plot(x, y, color=colors[i-1], linewidth=3)
        plt.title(name, fontsize=14)
        plt.grid(True)

        plt.xlim(-5, 7)
        # 根据激活函数特性调整y轴范围
        plt.ylim(global_ymin, global_ymax)
        
        # 统一刻度设置
        plt.xticks(np.arange(-5, 7, 1))
        plt.yticks(np.arange(int(global_ymin), int(global_ymax)+1, 1))

    plt.tight_layout(pad=3.0)

    # 将Matplotlib图像转为PIL图像
    buffer = BytesIO()
    plt.savefig(buffer, format='png', dpi=300, bbox_inches='tight')
    buffer.seek(0)
    plot_img = Image.open(buffer)

    # 创建最终输出图像（可选添加额外注释）
    final_img = Image.new('RGB', (plot_img.width, plot_img.height), 'white')
    final_img.paste(plot_img, (0, 0))
    # 添加居中标题
    draw = ImageDraw.Draw(final_img)
    try:
        font = ImageFont.truetype("arial.ttf", 40)
    except:
        font = ImageFont.load_default()
    # 计算居中文本位置
    text_width = font.getbbox("Activation Functions Comparison (PyTorch)")[2]
    x_position = (final_img.width - text_width) // 2
    draw.text((x_position, 20), "Activation Functions Comparison (PyTorch)", fill='black', font=font)
    # 保存结果
    final_img.save('combined_activations.png')
    final_img.show()



def shuffle_image_patches(img_tensor: torch.Tensor, 
                          patch_size: int, 
                          output_path: str = "output.png",
                          down_sample:bool = False) -> None:
    """
    将输入图像切割为指定尺寸的块，随机混排后重组保存
    
    参数：
    img_tensor : torch.Tensor - 输入图像张量，形状为 (1, H, W)
    patch_size : int - 块尺寸（3/5/7/9等）
    output_path : str - 输出图片路径
    
    返回：
    None
    """
    # 输入验证
    assert len(img_tensor.shape) == 3 and img_tensor.shape[0] == 1, "输入形状应为 (1, H, W)"
    assert isinstance(patch_size, int) and patch_size > 0, "块尺寸应为正整数"
    
    # 获取原始尺寸
    c, h, w = img_tensor.shape
    
    # 计算可整除的裁剪尺寸
    new_h = (h // patch_size) * patch_size
    new_w = (w // patch_size) * patch_size
    cropped = img_tensor[:, :new_h, :new_w]
    
    # 执行块切割
    blocks = cropped.unfold(1, patch_size, patch_size)  # 沿高度展开
    blocks = blocks.unfold(2, patch_size, patch_size)   # 沿宽度展开
    
    # 重组维度 (C, num_h, num_w, ph, pw) -> (num_total, C, ph, pw)
    num_h, num_w = new_h//patch_size, new_w//patch_size
    blocks = blocks.contiguous().view(-1, c, patch_size, patch_size)
    
    # 随机混排
    perm_idx = torch.randperm(blocks.size(0))
    shuffled_blocks = blocks[perm_idx]

    # 下采样处理
    if down_sample:
        avg_pool = nn.AvgPool2d(2, stride=2)
        processed_blocks = avg_pool(shuffled_blocks)
        display_size = patch_size // 2
    else:
        processed_blocks = shuffled_blocks
        display_size = patch_size

    # 图像重组（带1像素红线）
    red_line = torch.tensor([1.0, 0, 0], device=img_tensor.device)
    rows = []
    
    # 水平拼接
    for row_idx in range(num_h):
        row_start = row_idx * num_w
        row_blocks = processed_blocks[row_start : row_start + num_w]
        
        h_pieces = []
        for i, blk in enumerate(row_blocks):
            # 转换为RGB
            rgb_blk = blk.expand(3, display_size, display_size)
            h_pieces.append(rgb_blk)
            
            # 添加垂直分割线
            if i < num_w - 1:
                v_line = red_line.view(3, 1, 1).expand(3, display_size, 1)
                h_pieces.append(v_line)
        
        # 拼接整行
        row_img = torch.cat(h_pieces, dim=2)
        rows.append(row_img)
        
        # 添加水平分割线
        if row_idx < num_h - 1:
            h_line = red_line.view(3, 1, 1).expand(3, 1, row_img.shape[2])
            rows.append(h_line)

    # 最终垂直拼接
    final_img = torch.cat(rows, dim=1)
    
    # 保存结果
    img_array = final_img.mul(255).permute(1, 2, 0).byte().cpu().numpy()
    Image.fromarray(img_array, 'RGB').save(output_path)
    

def draw_scatter(tensor_a, tensor_b):
    """
    将两组形状为 (N, 2) 的张量绘制成散点图，坐标轴交叉在原点
    Args:
        tensor_a (torch.Tensor): 形状为 (N, 2) 的张量，使用蓝色显示
        tensor_b (torch.Tensor): 形状为 (N, 2) 的张量，使用红色显示
    """
    # 输入验证
    assert tensor_a.dim() == 2 and tensor_a.size(1) == 2, "Tensor A 必须是形状 (N, 2)"
    assert tensor_b.dim() == 2 and tensor_b.size(1) == 2, "Tensor B 必须是形状 (N, 2)"
    
    # 转换为 numpy 数组
    a_np = tensor_a.detach().cpu().numpy()
    b_np = tensor_b.detach().cpu().numpy()
    
    # 创建图像
    plt.figure(figsize=(8, 6))
    ax = plt.gca()  # 获取当前坐标轴
    
    # 绘制散点图（参数保持不变）
    plt.scatter(a_np[:, 0], a_np[:, 1], 
                c='blue', 
                label='Group A', 
                alpha=0.6, 
                edgecolors='w',
                s=100)
    
    plt.scatter(b_np[:, 0], b_np[:, 1], 
                c='red', 
                label='Group B', 
                alpha=0.6, 
                edgecolors='w',
                s=100)
    
    # === 新增坐标轴设置 ===
    # 将左右下上的spine移动到原点
    ax.spines['left'].set_position('center')    # y轴移动到x=0
    ax.spines['bottom'].set_position('center')  # x轴移动到y=0
    
    # 隐藏右侧和顶部轴线
    ax.spines['right'].set_visible(False)
    ax.spines['top'].set_visible(False)
    
    # 调整刻度标签位置
    ax.xaxis.set_ticks_position('bottom')  # x刻度显示在下方
    ax.yaxis.set_ticks_position('left')    # y刻度显示在左侧
    
    # 设置刻度偏移量（避免标签覆盖轴线）
    ax.xaxis.set_tick_params(pad=5)  # x刻度标签与轴线间距
    ax.yaxis.set_tick_params(pad=10) # y刻度标签与轴线间距
    
    # 保持其他设置
    plt.xlim(-2.1, 2.1)
    plt.ylim(-2.1, 2.1)
    plt.xlabel('', fontsize=12, x=1.05, labelpad=-25)  # 调整标签位置
    plt.ylabel('', fontsize=12, y=1.05, labelpad=-30)
    plt.title('Scatter Plot with Centered Axes', fontsize=14, pad=20)
    plt.legend(frameon=True, edgecolor='black', loc='upper right')
    plt.grid(True, linestyle='--', alpha=0.5)
    ax.set_aspect('equal')
    
    # 添加原点标注
    plt.text(0.1, 0.1, '(0,0)', fontsize=10, 
             transform=ax.transAxes, color='gray')
    
    plt.show()


def generate_circular_points(N):
    """
    生成在单位圆上交错分布的两组点
    Args:
        N (int): 每组点的数量
    Returns:
        tensor_a (torch.Tensor): 蓝色点坐标 (N, 2)
        tensor_b (torch.Tensor): 红色点坐标 (N, 2)
    """
    # 生成2N个等分角度（0 到 2π）
    angles = torch.linspace(0, 2 * torch.pi, 2 * N)
    
    # 拆分奇偶索引实现交错
    blue_angles = angles[::2]    # 偶数索引
    red_angles = angles[1::2]    # 奇数索引
    
    # 转换为笛卡尔坐标
    tensor_a = torch.stack([
        torch.cos(blue_angles),
        torch.sin(blue_angles)
    ], dim=1)
    
    tensor_b = torch.stack([
        torch.cos(red_angles),
        torch.sin(red_angles)
    ], dim=1)
    
    return tensor_a, tensor_b

def dict2json(dict_data):
    # 自定义JSON序列化处理函数
    def default_serializer(obj):
        # if hasattr(obj, '__dict__'):
        #     return vars(obj)  # 递归处理嵌套对象
        return obj.__repr__()  # 其他类型转为字符串
    try:
        json_output = json.dumps(
            dict_data,
            indent=4,
            # default=default_serializer,
            ensure_ascii=False #支持中文显示
        )
        return json_output
    except Exception as e:
        raise ValueError("dict2json 运行时出现错误")
        
def config2Log(m_conf):
    dict_data = {}
    for net_name in m_conf:
        if net_name == 'config_name':
            dict_data[net_name] = m_conf[net_name]
            continue
        else:
            dict_data[net_name] = {}
            for i, layer in enumerate(m_conf[net_name]):
                block_names = layer['block_name']
                if block_names == "GroupConvBN":
                    schema_ = ['inp', 'oup', 'ks', 'stride', 'dilation', 'groups', 'bn', 'act']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "MDW_Block":
                    schema_ =  ['inp', 'oup', 'hidden_sz', 'ks', 'dw_num', 'stride', 'dilation', 'e_bn', 'e_act', 'proj', 'proj_bn', 'res', 'dw_act']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "AvgPool2d":
                    schema_ =  ['kernel_size', 'stride']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "convbn":
                    schema_ =  ['inp', 'oup', 'kernel_size', 'stride']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "uib":
                    schema_ =  ['inp', 'oup', 'start_dw_kernel_size', 'middle_dw_kernel_size', 'middle_dw_downsample', 'stride', 'expand_ratio', 'mhsa']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "fused_ib":
                    schema_ = ['inp', 'oup', 'stride', 'expand_ratio', 'act']
                    for j in range(layer['num_blocks']):
                        args = dict(zip(schema_, layer['block_specs'][j]))
                        dict_data[net_name][f"layer_{i}_{block_names}_{j}"] = str(args)
                elif block_names == "FC":
                    schema_ = ['in_features', 'out_features', 'bias']
                    args = dict(zip(schema_, layer['block_specs']))
                    dict_data[net_name][f"layer_{i}_{block_names}"] = str(args)
                elif block_names == "SubCenterArcFace":
                    schema_ = ['embedding_size', 'num_classes', 'k', 'margin', 'scale', 'easy_margin']
                    args = dict(zip(schema_, layer['block_specs']))
                    dict_data[net_name][f"layer_{i}_{block_names}"] = str(args)
                elif block_names == "FC_classifier":
                    schema_ = ['in_features', 'out_features', 'bias']
                    args = dict(zip(schema_, layer['block_specs']))
                    dict_data[net_name][f"layer_{i}_{block_names}"] = str(args)
                else:
                    raise NotImplementedError
    return dict2json(dict_data)


def set_clsfier(m_conf, classes_num):
    assert len(m_conf['classifier']) == 1
    embedding_size = 0
    for layer in m_conf['features']:
        if layer['block_name'] not in ['AvgPool2d']:
            embedding_size = layer['block_specs'][-1][1]
    m_conf['classifier'][0]["block_specs"][0] = embedding_size  #设定分类器输入向量维度
    m_conf['classifier'][0]["block_specs"][1] = classes_num  #设定分类器分类数目
    return m_conf


def calculate_avg_time_elapsed(directory_path):
    """
    分析指定路径下的所有log文件（或单个log文件），提取所有time_elapsed值并计算平均值
    
    参数:
        directory_path (str): 包含log文件的目录路径或单个log文件路径
        
    返回:
        float: 所有time_elapsed值的平均值。如果没有找到任何值则返回0.0
    """
    total_time = 0.0
    count = 0
    
    # 正则表达式：要求行中包含"Avg train loss"且末尾有"time_elapsed: x.xxxs"
    pattern = r"Avg train loss.*time_elapsed:\s*(\d+\.\d+|\d+)s\b"

    # 处理单个文件的情况
    if os.path.isfile(directory_path):
        try:
            with open(directory_path, 'r', encoding='utf-8', errors='ignore') as file:
                for line in file:
                    match = re.search(pattern, line)
                    if match:
                        try:
                            time_val = float(match.group(1))
                            total_time += time_val
                            count += 1
                        except ValueError:
                            continue
        except Exception as e:
            print(f"处理文件 {os.path.basename(directory_path)} 时出错: {str(e)}")
    
    # 处理目录的情况
    elif os.path.isdir(directory_path):
        for filename in os.listdir(directory_path):
            file_path = os.path.join(directory_path, filename)
            
            if not os.path.isfile(file_path):
                continue
                
            try:
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as file:
                    for line in file:
                        match = re.search(pattern, line)
                        if match:
                            try:
                                time_val = float(match.group(1))
                                total_time += time_val
                                count += 1
                            except ValueError:
                                continue
            except Exception as e:
                print(f"处理文件 {filename} 时出错: {str(e)}")
                continue
    
    # 计算平均值
    if count == 0:
        return 0.0
    
    return total_time / count

def generate_experiment_report(logs_dir):
    """生成实验报告CSV文件
    
    Args:
        logs_dir (str): 包含实验文件夹的根目录路径
    """
    # 配置种子列表
    SEEDS = [42, 7, 13, 19, 21]
    CSV_OUTPUT = "experiment_report.csv"  # 输出到当前工作目录

    # 正则表达式模式
    TIME_PATTERN = r'Avg train loss.*time_elapsed:\s*(\d+\.\d+)\s*s'  # 匹配时间值
    ACC_PATTERN = r'best val_acc:\s*(\d+\.\d+)'                      # 匹配精度值

    def find_log_file(exp_path, seed):
        """在实验文件夹中查找包含指定种子的日志文件"""
        pattern = os.path.join(exp_path, f"*seed{seed}*.log")
        files = glob.glob(pattern)
        return files[0] if files else None

    def extract_epoch_times(log_path):
        """从日志文件中提取所有epoch耗时"""
        times = []
        try:
            with open(log_path, 'r') as f:
                for line in f:
                    match = re.search(TIME_PATTERN, line)
                    if match:
                        times.append(float(match.group(1)))
            return times
        except Exception as e:
            print(f"Error processing {log_path}: {str(e)}")
            return []

    def extract_final_accuracy(log_path):
        """从日志文件末尾提取最终精度"""
        try:
            with open(log_path, 'r') as f:
                # 读取最后20行以确保覆盖精度信息
                lines = f.readlines()[-20:]
                for line in reversed(lines):
                    match = re.search(ACC_PATTERN, line)
                    if match:
                        return float(match.group(1))
                print(f"Accuracy not found in {log_path}")
        except Exception as e:
            print(f"Error reading {log_path}: {str(e)}")
        return None

    results = []
    
    # 检查日志目录是否存在
    if not os.path.exists(logs_dir):
        print(f"Error: Logs directory '{logs_dir}' does not exist")
        return
    
    # 遍历实验文件夹
    for exp_dir in os.listdir(logs_dir):
        exp_path = os.path.join(logs_dir, exp_dir)
        if not os.path.isdir(exp_path):
            continue
            
        print(f"Processing experiment: {exp_dir}")
        exp_data = {
            "name": exp_dir,
            "spe": 0,
            "accuracies": {seed: None for seed in SEEDS},
            "mean_acc": 0
        }
        
        # 处理seed42的日志（获取SPE）
        seed42_log = find_log_file(exp_path, 42)
        if seed42_log:
            epoch_times = extract_epoch_times(seed42_log)
            if epoch_times:
                exp_data["spe"] = sum(epoch_times) / len(epoch_times)
                print(f"  Found {len(epoch_times)} epochs in seed42 log")
            else:
                print(f"  No epoch times found in seed42 log")
        else:
            print(f"  Seed42 log not found")
        
        # 处理所有种子的精度
        valid_accs = []
        for seed in SEEDS:
            log_file = find_log_file(exp_path, seed)
            if log_file:
                acc = extract_final_accuracy(log_file)
                if acc is not None:
                    exp_data["accuracies"][seed] = acc
                    valid_accs.append(acc)
                    print(f"  Seed{seed} accuracy: {acc:.6f}")
                else:
                    print(f"  Accuracy not found for seed{seed}")
            else:
                print(f"  Log missing for seed{seed}")
        
        # 计算平均精度（仅使用找到的精度值）
        if valid_accs:
            exp_data["mean_acc"] = sum(valid_accs) / len(valid_accs)
            print(f"  Mean accuracy: {exp_data['mean_acc']:.6f}")
        else:
            print(f"  No valid accuracies found")
        
        results.append(exp_data)
    
    # 生成CSV报告
    with open(CSV_OUTPUT, 'w', newline='') as csvfile:
        fieldnames = [
            'Experiment',
            'SPE',
            'seed42_acc',
            'seed7_acc',
            'seed13_acc',
            'seed19_acc',
            'seed21_acc',
            'Mean_Accuracy'
        ]
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        writer.writeheader()
        
        for exp in results:
            row = {
                'Experiment': exp["name"],
                'SPE': f"{exp['spe']:.4f}",
                'seed42_acc': f"{exp['accuracies'][42]:.6f}" if exp['accuracies'][42] is not None else "N/A",
                'seed7_acc': f"{exp['accuracies'][7]:.6f}" if exp['accuracies'][7] is not None else "N/A",
                'seed13_acc': f"{exp['accuracies'][13]:.6f}" if exp['accuracies'][13] is not None else "N/A",
                'seed19_acc': f"{exp['accuracies'][19]:.6f}" if exp['accuracies'][19] is not None else "N/A",
                'seed21_acc': f"{exp['accuracies'][21]:.6f}" if exp['accuracies'][21] is not None else "N/A",
                'Mean_Accuracy': f"{exp['mean_acc']:.6f}" if exp['mean_acc'] != 0 else "N/A"
            }
            writer.writerow(row)
    
    print(f"\nReport generated: {CSV_OUTPUT}")
    print(f"Processed {len(results)} experiments")

if __name__ == '__main__':
    # gb2312_chars = get_gb2312_chars(level=1)

    # # 打印出所有字符
    # for char in gb2312_chars:
    #     print(char, end='')

    # get_ecdict_words(9, 10, 1000)
    
    # 生成XIKE-CFS数据集
    # train_chars = random.sample(gb2312_chars, 800)
    # _val_chars = list(set(gb2312_chars) - set(train_chars))
    # val_chars = random.sample(_val_chars, 200)
    # generator(train_chars, mod='train', output_folder='./dataset/XIKE-CFS-1/', 
    #           font_folder='./data/fonts/XIKE-CFS-1', data_aug_size=0, 
    #           image_size=(100, 100), font_size=96, seed=42)
    # generator(val_chars, mod='val', output_folder='./dataset/XIKE-CFS-1/', 
    #           font_folder='./data/fonts/XIKE-CFS-1', data_aug_size=0, 
    #           image_size=(100, 100), font_size=96, seed=42)

    # cal_mean_std('./dataset/nankai_black/')

    # model = CAE_V3()
    # count_parameters(model)

    # data_aug('./dataset/nankai_without_handwrite_DAG/', 5)

    # convert_image_color('./dataset/XIKE-CFS-1', './dataset/XIKE-CFS-1_black', resize=(96, 96))
    # convert_to_L('./dataset/XIKE-CFS-1_black')
    # convert_to_L('./dataset/CSC')
    # find_common_char('./dataset/nankai_black/val')
    # act_func_view()
    
    #shuffle_image_patches
        # 生成测试图像（1x96x96的随机噪声图）
    # from dataset_96 import ZhCharDataset_96
    # dataset = ZhCharDataset_96('./dataset/nankai_raw/', mod='val', transform=True)
    # test_img, _, _ = dataset[2500]
    # test_img = dataset.deNorm(test_img)
    # shuffle_image_patches(test_img, 3, "down_shuffled_3x3.png", down_sample=True)
    # shuffle_image_patches(test_img, 5, "down_shuffled_5x5.png", down_sample=True)
    # shuffle_image_patches(test_img, 7, "down_shuffled_7x7.png", down_sample=True)
    # shuffle_image_patches(test_img, 9, "down_shuffled_9x9.png", down_sample=True)
    # shuffle_image_patches(test_img, 11, "down_shuffled_11x11.png", down_sample=True)
    # shuffle_image_patches(test_img, 13, "down_shuffled_13x13.png", down_sample=True)
    # shuffle_image_patches(test_img, 15, "down_shuffled_15x15.png", down_sample=True)
    # shuffle_image_patches(test_img, 17, "down_shuffled_17x17.png", down_sample=True)
    # shuffle_image_patches(test_img, 19, "down_shuffled_19x19.png", down_sample=True)
 

    # N = 20
    # blue_points, red_points = generate_circular_points(N)
    # draw_scatter(blue_points, red_points)
    embed()