import csv
import matplotlib.pyplot as plt
from sklearn.metrics import calinski_harabasz_score
import warnings
import time
import psutil
from datetime import datetime

# 尝试导入 pynvml 用于 GPU 监控
try:
    import pynvml

    has_gpu = True
    pynvml.nvmlInit()
except ImportError:
    has_gpu = False
    print("警告: 未找到 pynvml 库，GPU 监控功能将不可用。请安装 pynvml: pip install pynvml")
except Exception as e:
    has_gpu = False
    print(f"警告: 初始化 GPU 监控失败: {e}，GPU 监控功能将不可用。")

warnings.filterwarnings('ignore')
from SpaGRA.utils import *
from SpaGRA.process import *
from SpaGRA import train_model as original_train  # 直接导入原始训练模块

# 创建保存监控数据的目录
if not os.path.exists('monitoring_data'):
    os.makedirs('monitoring_data')


# 监控函数
def get_system_metrics():
    metrics = {}

    # CPU指标
    metrics['cpu_percent'] = psutil.cpu_percent(interval=0.1)  # 增加采样间隔
    metrics['cpu_count'] = psutil.cpu_count(logical=True)

    # 内存指标
    memory = psutil.virtual_memory()
    metrics['memory_total'] = memory.total / (1024 ** 3)  # GB
    metrics['memory_used'] = memory.used / (1024 ** 3)  # GB
    metrics['memory_percent'] = memory.percent

    # GPU指标
    if has_gpu:
        try:
            handle = pynvml.nvmlDeviceGetHandleByIndex(0)
            info = pynvml.nvmlDeviceGetUtilizationRates(handle)
            mem_info = pynvml.nvmlDeviceGetMemoryInfo(handle)

            metrics['gpu_percent'] = info.gpu
            metrics['gpu_memory_total'] = mem_info.total / (1024 ** 2)  # MB
            metrics['gpu_memory_used'] = mem_info.used / (1024 ** 2)  # MB
            metrics['gpu_memory_percent'] = mem_info.used / mem_info.total * 100
        except Exception as e:
            print(f"获取 GPU 指标时出错: {e}")
            metrics['gpu_percent'] = 0
            metrics['gpu_memory_total'] = 0
            metrics['gpu_memory_used'] = 0
            metrics['gpu_memory_percent'] = 0
    else:
        metrics['gpu_percent'] = 0
        metrics['gpu_memory_total'] = 0
        metrics['gpu_memory_used'] = 0
        metrics['gpu_memory_percent'] = 0

    return metrics


# 写入监控数据到CSV
def write_monitoring_data(file_path, data, header=False):
    with open(file_path, 'a', newline='') as f:
        writer = csv.DictWriter(f, fieldnames=data.keys())
        if header:
            writer.writeheader()
        writer.writerow(data)


# 自定义训练函数，增加资源监控
def train_with_monitoring(adata, hidden_dims, n_epochs, num_hidden, lr, key_added, a, b, c, radius,
                          weight_decay, random_seed, feat_drop, attn_drop, negative_slope, heads, method, reso):
    """训练模型并在每个epoch后监控资源使用"""
    import inspect

    # 检查原始训练函数的参数
    sig = inspect.signature(original_train.train)
    if 'epoch_callback' in sig.parameters:
        # 如果支持回调，使用回调方法
        return _train_with_callback(adata, hidden_dims, n_epochs, num_hidden, lr, key_added, a, b, c, radius,
                                    weight_decay, random_seed, feat_drop, attn_drop, negative_slope, heads, method,
                                    reso)
    else:
        # 如果不支持回调，使用轮询方法
        return _train_with_polling(adata, hidden_dims, n_epochs, num_hidden, lr, key_added, a, b, c, radius,
                                   weight_decay, random_seed, feat_drop, attn_drop, negative_slope, heads, method, reso)


# 使用回调方法监控训练
def _train_with_callback(adata, hidden_dims, n_epochs, num_hidden, lr, key_added, a, b, c, radius,
                         weight_decay, random_seed, feat_drop, attn_drop, negative_slope, heads, method, reso):
    # 初始化峰值监控
    max_cpu = 0
    max_memory = 0
    max_gpu = 0
    max_gpu_memory = 0

    # 创建epoch监控数据文件
    epoch_monitoring_file = f"monitoring_data/epoch_metrics_batch_{adata.obs.index[0]}_{adata.obs.index[-1]}.csv"

    # 写入表头
    epoch_header = {
        'epoch': 'epoch', 'cpu_percent': 'cpu_percent', 'memory_percent': 'memory_percent',
        'gpu_percent': 'gpu_percent', 'gpu_memory_percent': 'gpu_memory_percent'
    }
    write_monitoring_data(epoch_monitoring_file, epoch_header, header=True)

    # 自定义回调函数，在每个epoch后执行
    def epoch_callback(epoch, model, adata):
        nonlocal max_cpu, max_memory, max_gpu, max_gpu_memory

        # 获取当前资源使用情况
        metrics = get_system_metrics()

        # 更新峰值
        if metrics['cpu_percent'] > max_cpu:
            max_cpu = metrics['cpu_percent']
        if metrics['memory_percent'] > max_memory:
            max_memory = metrics['memory_percent']
        if has_gpu and metrics['gpu_percent'] > max_gpu:
            max_gpu = metrics['gpu_percent']
        if has_gpu and metrics['gpu_memory_percent'] > max_gpu_memory:
            max_gpu_memory = metrics['gpu_memory_percent']

        # 记录当前epoch的资源使用
        epoch_metrics = {
            'epoch': epoch,
            'cpu_percent': metrics['cpu_percent'],
            'memory_percent': metrics['memory_percent'],
            'gpu_percent': metrics['gpu_percent'],
            'gpu_memory_percent': metrics['gpu_memory_percent']
        }
        write_monitoring_data(epoch_monitoring_file, epoch_metrics)

        # 打印进度和资源使用（可选）
        print(f"Epoch {epoch}/{n_epochs} - CPU: {metrics['cpu_percent']:.1f}%, "
              f"Memory: {metrics['memory_percent']:.1f}%", end='')
        if has_gpu:
            print(f", GPU: {metrics['gpu_percent']:.1f}%, GPU Memory: {metrics['gpu_memory_percent']:.1f}%")
        else:
            print()

    # 调用原始训练函数，并传入回调函数
    adata = original_train.train(
        adata,
        hidden_dims=hidden_dims,
        n_epochs=n_epochs,
        num_hidden=num_hidden,
        lr=lr,
        key_added=key_added,
        a=a,
        b=b,
        c=c,
        radius=radius,
        weight_decay=weight_decay,
        random_seed=random_seed,
        feat_drop=feat_drop,
        attn_drop=attn_drop,
        negative_slope=negative_slope,
        heads=heads,
        method=method,
        reso=reso,
        epoch_callback=epoch_callback  # 传递回调函数
    )

    # 返回训练好的adata和峰值资源使用
    return adata, max_cpu, max_memory, max_gpu, max_gpu_memory


# 使用轮询方法监控训练
def _train_with_polling(adata, hidden_dims, n_epochs, num_hidden, lr, key_added, a, b, c, radius,
                        weight_decay, random_seed, feat_drop, attn_drop, negative_slope, heads, method, reso):
    """在不支持回调的情况下，通过轮询方式监控训练过程"""
    import threading
    import time

    # 初始化峰值监控
    max_cpu = 0
    max_memory = 0
    max_gpu = 0
    max_gpu_memory = 0
    training_active = True

    # 创建epoch监控数据文件
    epoch_monitoring_file = f"monitoring_data/epoch_metrics_batch_{adata.obs.index[0]}_{adata.obs.index[-1]}.csv"

    # 写入表头
    epoch_header = {
        'timestamp': 'timestamp', 'cpu_percent': 'cpu_percent', 'memory_percent': 'memory_percent',
        'gpu_percent': 'gpu_percent', 'gpu_memory_percent': 'gpu_memory_percent'
    }
    write_monitoring_data(epoch_monitoring_file, epoch_header, header=True)

    # 监控线程函数
    def monitor_training():
        nonlocal max_cpu, max_memory, max_gpu, max_gpu_memory

        print("开始监控训练过程...")
        iteration = 0

        while training_active:
            # 获取当前资源使用情况
            metrics = get_system_metrics()

            # 更新峰值
            if metrics['cpu_percent'] > max_cpu:
                max_cpu = metrics['cpu_percent']
            if metrics['memory_percent'] > max_memory:
                max_memory = metrics['memory_percent']
            if has_gpu and metrics['gpu_percent'] > max_gpu:
                max_gpu = metrics['gpu_percent']
            if has_gpu and metrics['gpu_memory_percent'] > max_gpu_memory:
                max_gpu_memory = metrics['gpu_memory_percent']

            # 记录当前时间的资源使用
            metrics['timestamp'] = time.time()
            write_monitoring_data(epoch_monitoring_file, metrics)

            iteration += 1
            if iteration % 10 == 0:  # 每10次迭代打印一次
                print(f"监控中 ({iteration}) - CPU: {metrics['cpu_percent']:.1f}%, "
                      f"Memory: {metrics['memory_percent']:.1f}%", end='')
                if has_gpu:
                    print(f", GPU: {metrics['gpu_percent']:.1f}%, GPU Memory: {metrics['gpu_memory_percent']:.1f}%")
                else:
                    print()

            time.sleep(1)  # 每秒监控一次

    # 启动监控线程
    monitor_thread = threading.Thread(target=monitor_training)
    monitor_thread.daemon = True
    monitor_thread.start()

    try:
        # 调用原始训练函数
        print(f"开始训练模型 (n_epochs={n_epochs})...")
        start_train_time = time.time()

        adata = original_train.train(
            adata,
            hidden_dims=hidden_dims,
            n_epochs=n_epochs,
            num_hidden=num_hidden,
            lr=lr,
            key_added=key_added,
            a=a,
            b=b,
            c=c,
            radius=radius,
            weight_decay=weight_decay,
            random_seed=random_seed,
            feat_drop=feat_drop,
            attn_drop=attn_drop,
            negative_slope=negative_slope,
            heads=heads,
            method=method,
            reso=reso
        )

        train_time = time.time() - start_train_time
        print(f"训练完成，耗时: {train_time:.2f} 秒")

    finally:
        # 停止监控线程
        training_active = False
        monitor_thread.join(timeout=2.0)

    # 估计每个epoch的资源使用（由于无法获取实际epoch数，只能按时间平均）
    print(f"训练期间资源使用峰值 - CPU: {max_cpu:.2f}%, 内存: {max_memory:.2f}%", end='')
    if has_gpu:
        print(f", GPU: {max_gpu:.2f}%, GPU内存: {max_gpu_memory:.2f}%")
    else:
        print()

    # 返回训练好的adata和峰值资源使用
    return adata, max_cpu, max_memory, max_gpu, max_gpu_memory


# 主程序
os.environ["TORCH_CUDA_ARCH_LIST"] = "8.9"
print("TORCH_CUDA_ARCH_LIST:", os.environ.get("TORCH_CUDA_ARCH_LIST", "未设置"))
print("CUDA可用:", torch.cuda.is_available())

# 记录开始时间
start_time = time.time()
experiment_start_time = datetime.now().strftime("%Y%m%d_%H%M%S")

# 创建监控数据文件
monitoring_file = f"monitoring_data/system_metrics_{experiment_start_time}.csv"
batch_monitoring_file = f"monitoring_data/batch_metrics_{experiment_start_time}.csv"

# 写入监控数据文件头
header_metrics = get_system_metrics()
header_metrics['timestamp'] = 'timestamp'
header_metrics['elapsed_time'] = 'elapsed_time'
header_metrics['batch'] = 'batch'
write_monitoring_data(monitoring_file, header_metrics, header=True)

# 读取数据
adata = sc.read("../visium_hd_dense.h5ad")
print(f"原始数据集大小: {adata.n_obs}个spots, {adata.n_vars}个基因")

# ================== 随机采样 ==================
sampling_ratio = 1  # 采样10%的数据
np.random.seed(42)  # 设置随机种子保证可重复性
sampled_indices = np.random.choice(adata.n_obs, size=int(adata.n_obs * sampling_ratio), replace=False)
sampled_adata = adata[sampled_indices, :].copy()
print(f"采样后数据集大小: {sampled_adata.n_obs}个spots, {sampled_adata.n_vars}个基因")

# ================== 分批次处理 ==================
# 设置批次大小（根据采样后的数据量调整）
batch_size = 5000  # 由于数据量减少，可适当减小批次大小
num_batches = (sampled_adata.n_obs + batch_size - 1) // batch_size

# 初始化一个列表来存储每个批次的结果和处理时间
batch_results = []
batch_times = []  # 存储每个batch的处理时间
batch_metrics = []  # 存储每个batch的最大资源使用情况

# 对数据进行预处理（这些操作可以在批处理前完成，因为它们不会占用太多内存）
sampled_adata.var_names_make_unique()
prefilter_genes(sampled_adata, min_cells=3)
sc.pp.highly_variable_genes(sampled_adata, flavor="seurat_v3", n_top_genes=1000)

# 逐批次处理数据
for i in range(num_batches):
    batch_start_time = time.time()
    print(f"\n处理批次 {i + 1}/{num_batches}")

    # 获取当前批次的数据
    start_idx = i * batch_size
    end_idx = min((i + 1) * batch_size, sampled_adata.n_obs)
    batch_adata = sampled_adata[start_idx:end_idx, :].copy()

    # 对批次数据进行剩余的预处理
    sc.pp.normalize_per_cell(batch_adata)
    sc.pp.log1p(batch_adata)

    # 计算空间网络
    Cal_Spatial_Net(batch_adata, rad_cutoff=150)

    # 训练模型并监控资源
    print(f"开始训练模型 (批次 {i + 1}/{num_batches})...")
    batch_adata, batch_max_cpu, batch_max_memory, batch_max_gpu, batch_max_gpu_memory = train_with_monitoring(
        batch_adata,
        hidden_dims=1000,
        n_epochs=100,
        num_hidden=600,
        lr=0.00008,
        key_added='SpaGRA',
        a=2, b=1, c=1,
        radius=0,
        weight_decay=0.00001,
        random_seed=0,
        feat_drop=0.02,
        attn_drop=0.01,
        negative_slope=0.02,
        heads=4,
        method="louvain",
        reso=0.8
    )

    # 保存批次结果
    batch_results.append(batch_adata)

    # 清理内存
    del batch_adata
    import gc

    gc.collect()

    batch_end_time = time.time()
    batch_time = batch_end_time - batch_start_time
    batch_times.append(batch_time)

    # 保存批次最大资源使用情况
    batch_metric = {
        'batch': i + 1,
        'batch_time': batch_time,
        'max_cpu_percent': batch_max_cpu,
        'max_memory_percent': batch_max_memory,
        'max_gpu_percent': batch_max_gpu,
        'max_gpu_memory_percent': batch_max_gpu_memory
    }
    batch_metrics.append(batch_metric)

    print(f"批次 {i + 1}/{num_batches} 处理完成，耗时: {batch_time:.2f} 秒")
    print(f"资源使用峰值 - CPU: {batch_max_cpu:.2f}%, 内存: {batch_max_memory:.2f}%", end='')
    if has_gpu:
        print(f", GPU: {batch_max_gpu:.2f}%, GPU内存: {batch_max_gpu_memory:.2f}%")
    else:
        print()

# 计算总处理时间和平均批次处理时间
total_time = sum(batch_times)
average_time = np.mean(batch_times)
print(f"\n所有批次处理完成！")
print(f"总处理时间: {total_time:.2f} 秒")
print(f"平均批次处理时间: {average_time:.2f} 秒")

# 保存批次处理时间和资源使用到CSV
with open(batch_monitoring_file, 'w', newline='') as f:
    fieldnames = ['batch', 'batch_time', 'max_cpu_percent', 'max_memory_percent', 'max_gpu_percent',
                  'max_gpu_memory_percent']
    writer = csv.DictWriter(f, fieldnames=fieldnames)
    writer.writeheader()
    for metric in batch_metrics:
        writer.writerow(metric)

# 合并所有批次的结果
processed_adata = batch_results[0].concatenate(batch_results[1:])
print(f"处理完成! 合并后的数据集大小: {processed_adata.n_obs}个spots, {processed_adata.n_vars}个基因")

# ================== 聚类评估 ==================
print("正在计算聚类评估指标...")

# 获取聚类结果和特征表示
X = processed_adata.obsm['SpaGRA']  # 使用SpaGRA的嵌入作为特征
labels = processed_adata.obs['SpaGRA'].astype(str)  # 聚类标签

# 计算 Calinski-Harabasz 指数
ch_score = calinski_harabasz_score(X, labels)
print(f"Calinski-Harabasz 指数: {ch_score:.4f}")

# 保存评估结果到文件
with open("clustering_metrics.txt", "w") as f:
    f.write(f"采样比例: {sampling_ratio * 100}%\n")
    f.write(f"Calinski-Harabasz 指数: {ch_score:.4f}\n")
    f.write(f"总批次处理时间: {total_time:.2f} 秒\n")
    f.write(f"平均批次处理时间: {average_time:.2f} 秒\n")

# ================== 结果可视化 ==================
coor = pd.DataFrame(processed_adata.obsm['spatial'])
coor.index = processed_adata.obs.index
coor.columns = ['imagerow', 'imagecol']
processed_adata.obs["x_pixel"] = coor['imagerow']
processed_adata.obs["y_pixel"] = coor['imagecol']

# 计算合适的点大小
base_size = 120
point_size = base_size * (40000 / processed_adata.n_obs)  # 基于40k标准样本调整大小

domains = 'SpaGRA'
title = f'SpaGRA (Sampled {sampling_ratio * 100:.0f}% Data)\nCH: {ch_score:.4f}'
ax = sc.pl.scatter(processed_adata,
                   alpha=1,
                   x="x_pixel",
                   y="y_pixel",
                   color=domains,
                   legend_fontsize=14,
                   show=False,
                   size=point_size,  # 动态调整点大小
                   title=title)

ax.set_aspect('equal', 'box')
ax.set_xticks([])
ax.set_yticks([])
ax.axes.invert_yaxis()
plt.savefig(f"HD_sampled_{sampling_ratio}.pdf", bbox_inches='tight', dpi=300)
plt.close()

print(f"分析完成! 结果保存为 HD_sampled_{sampling_ratio}.pdf")
print(f"聚类评估指标已保存到 clustering_metrics.txt")
print(f"批次处理时间已保存到 {batch_monitoring_file}")
print(f"系统资源监控数据已保存到 {monitoring_file}")
print(f"每个批次的资源使用数据已保存到 monitoring_data/epoch_metrics_batch_*.csv")

# 清理GPU资源
if has_gpu:
    pynvml.nvmlShutdown()