"""
工具函数模块
包含项目中常用的辅助函数
"""

import os
import json
import time
import random
import numpy as np
import torch
import logging
from datetime import datetime
import nibabel as nib
from pathlib import Path
import shutil
import hashlib


def set_random_seed(seed=42):
    """
    设置随机种子以确保可重复性
    
    Args:
        seed: 随机种子值
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False


def create_logger(name, log_file=None, level=logging.INFO):
    """
    创建日志记录器
    
    Args:
        name: 日志记录器名称
        log_file: 日志文件路径
        level: 日志级别
        
    Returns:
        logger: 日志记录器实例
    """
    logger = logging.getLogger(name)
    logger.setLevel(level)
    
    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(level)
    
    # 格式化器
    formatter = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
    )
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)
    
    # 文件处理器
    if log_file:
        file_handler = logging.FileHandler(log_file)
        file_handler.setLevel(level)
        file_handler.setFormatter(formatter)
        logger.addHandler(file_handler)
    
    return logger


class Timer:
    """计时器类"""
    
    def __init__(self):
        self.start_time = None
        self.elapsed_time = 0
        
    def start(self):
        """开始计时"""
        self.start_time = time.time()
        
    def stop(self):
        """停止计时并返回经过的时间"""
        if self.start_time is None:
            return 0
        self.elapsed_time = time.time() - self.start_time
        self.start_time = None
        return self.elapsed_time
    
    def __enter__(self):
        self.start()
        return self
        
    def __exit__(self, *args):
        self.stop()
        
    def __str__(self):
        return format_time(self.elapsed_time)


def format_time(seconds):
    """
    格式化时间显示
    
    Args:
        seconds: 秒数
        
    Returns:
        formatted_time: 格式化的时间字符串
    """
    if seconds < 60:
        return f"{seconds:.2f}s"
    elif seconds < 3600:
        minutes = seconds // 60
        seconds = seconds % 60
        return f"{int(minutes)}m {int(seconds)}s"
    else:
        hours = seconds // 3600
        minutes = (seconds % 3600) // 60
        seconds = seconds % 60
        return f"{int(hours)}h {int(minutes)}m {int(seconds)}s"


def get_memory_usage():
    """获取当前GPU内存使用情况"""
    if torch.cuda.is_available():
        allocated = torch.cuda.memory_allocated() / 1024**3  # GB
        reserved = torch.cuda.memory_reserved() / 1024**3  # GB
        return {
            'allocated': allocated,
            'reserved': reserved,
            'free': torch.cuda.mem_get_info()[0] / 1024**3  # GB
        }
    return None


def save_checkpoint(state, is_best, checkpoint_dir, filename='checkpoint.pth'):
    """
    保存模型检查点
    
    Args:
        state: 要保存的状态字典
        is_best: 是否为最佳模型
        checkpoint_dir: 检查点目录
        filename: 文件名
    """
    filepath = os.path.join(checkpoint_dir, filename)
    torch.save(state, filepath)
    
    if is_best:
        best_filepath = os.path.join(checkpoint_dir, 'model_best.pth')
        shutil.copyfile(filepath, best_filepath)


def load_checkpoint(checkpoint_path, model, optimizer=None, device='cpu'):
    """
    加载模型检查点
    
    Args:
        checkpoint_path: 检查点路径
        model: 模型实例
        optimizer: 优化器实例（可选）
        device: 设备
        
    Returns:
        checkpoint: 检查点字典
    """
    checkpoint = torch.load(checkpoint_path, map_location=device)
    
    model.load_state_dict(checkpoint['model_state_dict'])
    
    if optimizer and 'optimizer_state_dict' in checkpoint:
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    
    return checkpoint


def count_parameters(model):
    """
    统计模型参数量
    
    Args:
        model: PyTorch模型
        
    Returns:
        total_params: 总参数量
        trainable_params: 可训练参数量
    """
    total_params = sum(p.numel() for p in model.parameters())
    trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    return total_params, trainable_params


def print_model_summary(model, input_size=(1, 1, 256, 256)):
    """
    打印模型摘要
    
    Args:
        model: PyTorch模型
        input_size: 输入尺寸
    """
    total_params, trainable_params = count_parameters(model)
    
    print("="*60)
    print("Model Summary")
    print("="*60)
    print(f"Total parameters: {total_params:,}")
    print(f"Trainable parameters: {trainable_params:,}")
    print(f"Non-trainable parameters: {total_params - trainable_params:,}")
    print("="*60)


def get_file_hash(filepath):
    """
    计算文件的MD5哈希值
    
    Args:
        filepath: 文件路径
        
    Returns:
        hash_value: MD5哈希值
    """
    hash_md5 = hashlib.md5()
    with open(filepath, "rb") as f:
        for chunk in iter(lambda: f.read(4096), b""):
            hash_md5.update(chunk)
    return hash_md5.hexdigest()


def ensure_dir(directory):
    """
    确保目录存在，如果不存在则创建
    
    Args:
        directory: 目录路径
    """
    Path(directory).mkdir(parents=True, exist_ok=True)


def clean_directory(directory, keep_recent=5):
    """
    清理目录中的旧文件，保留最近的N个
    
    Args:
        directory: 目录路径
        keep_recent: 保留的文件数量
    """
    if not os.path.exists(directory):
        return
    
    files = sorted(
        Path(directory).glob('*'),
        key=lambda x: x.stat().st_mtime,
        reverse=True
    )
    
    for file in files[keep_recent:]:
        if file.is_file():
            file.unlink()


def normalize_path(path):
    """
    标准化路径
    
    Args:
        path: 原始路径
        
    Returns:
        normalized_path: 标准化后的路径
    """
    return os.path.abspath(os.path.expanduser(path))


def get_nifti_info(nifti_path):
    """
    获取NIfTI文件信息
    
    Args:
        nifti_path: NIfTI文件路径
        
    Returns:
        info: 文件信息字典
    """
    img = nib.load(nifti_path)
    data = img.get_fdata()
    
    info = {
        'shape': data.shape,
        'dtype': data.dtype,
        'affine': img.affine.tolist(),
        'header': dict(img.header),
        'min_value': float(np.min(data)),
        'max_value': float(np.max(data)),
        'mean_value': float(np.mean(data)),
        'std_value': float(np.std(data)),
        'file_size_mb': os.path.getsize(nifti_path) / (1024 * 1024)
    }
    
    return info


def merge_dicts(*dicts):
    """
    合并多个字典
    
    Args:
        *dicts: 要合并的字典
        
    Returns:
        merged: 合并后的字典
    """
    merged = {}
    for d in dicts:
        merged.update(d)
    return merged


class AverageMeter:
    """用于跟踪指标的平均值"""
    
    def __init__(self):
        self.reset()
        
    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0
        
    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def save_results(results, save_path):
    """
    保存结果到JSON文件
    
    Args:
        results: 结果字典
        save_path: 保存路径
    """
    # 确保所有值都是可序列化的
    def convert_to_serializable(obj):
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, (np.int64, np.int32)):
            return int(obj)
        elif isinstance(obj, (np.float64, np.float32)):
            return float(obj)
        elif isinstance(obj, dict):
            return {k: convert_to_serializable(v) for k, v in obj.items()}
        elif isinstance(obj, list):
            return [convert_to_serializable(item) for item in obj]
        else:
            return obj
    
    serializable_results = convert_to_serializable(results)
    
    with open(save_path, 'w') as f:
        json.dump(serializable_results, f, indent=4)


def load_results(load_path):
    """
    从JSON文件加载结果
    
    Args:
        load_path: 加载路径
        
    Returns:
        results: 结果字典
    """
    with open(load_path, 'r') as f:
        results = json.load(f)
    return results


if __name__ == "__main__":
    # 测试工具函数
    print("测试工具函数模块...")
    
    # 测试计时器
    print("\n测试计时器:")
    with Timer() as timer:
        time.sleep(1.5)
    print(f"经过时间: {timer}")
    
    # 测试参数统计
    print("\n测试模型参数统计:")
    from nestedunet import NestedUNet
    model = NestedUNet()
    print_model_summary(model)
    
    # 测试内存使用
    print("\n测试GPU内存使用:")
    memory = get_memory_usage()
    if memory:
        print(f"已分配: {memory['allocated']:.2f} GB")
        print(f"已保留: {memory['reserved']:.2f} GB")
        print(f"可用: {memory['free']:.2f} GB")
    else:
        print("GPU不可用")