import anndata
import time
import scanpy as sc
import numpy as np
import pandas as pd
import scipy.io
import scipy.sparse as sp
import json
import os
import gc
import logging
import matplotlib.pyplot as plt
import psutil
from pathlib import Path
from typing import List, Union
from sklearn.neighbors import NearestNeighbors
import matplotlib.image as mpimg
from sklearn.decomposition import TruncatedSVD, PCA, IncrementalPCA
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
import umap
import warnings
import h5py
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
from scipy.stats import entropy
import joblib
import seaborn as sns
from matplotlib.colors import ListedColormap, LinearSegmentedColormap

# 导入main.py中的可用函数
from main import (
    generate_spatial_weights_fixed_nbrs,
    concatenate_all,
    LeidenPartition
)

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
warnings.filterwarnings('ignore')  # 忽略不必要的警告

# 设置全局绘图风格
sns.set_style("white")
plt.rcParams['font.family'] = 'DejaVu Sans'
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 150

# 创建专业配色方案
professional_cmap = ListedColormap(plt.cm.tab20.colors)


# 内存状态监控函数
def log_memory_usage(msg):
    mem = psutil.virtual_memory()
    logger.info(f"{msg} | 内存: {mem.used / 1e9:.1f}GB/{mem.total / 1e9:.1f}GB (可用: {mem.available / 1e9:.1f}GB)")


# -----------------------------------------------------------
# 1. 数据加载函数
# -----------------------------------------------------------
def load_visium_hd_data(resolution='016um'):
    """加载Visium HD数据并进行必要预处理"""
    base_dir = Path('/root/autodl-tmp/shengwujishu/data/binned_outputs') / f'square_{resolution}'
    log_memory_usage("开始加载数据")

    # 加载表达矩阵
    mtx_path = base_dir / 'filtered_feature_bc_matrix' / 'matrix.mtx'
    barcodes_path = base_dir / 'filtered_feature_bc_matrix' / 'barcodes.tsv'
    features_path = base_dir / 'filtered_feature_bc_matrix' / 'features.tsv'

    # 检查文件头是否包含Matrix Market标识
    with open(mtx_path, 'r') as f:
        first_line = f.readline().strip()
        if not first_line.startswith('%%MatrixMarket'):
            logger.warning(f"检测到非标准Matrix Market文件头: {first_line[:50]}...")
            logger.warning("尝试修复文件头...")
            # 创建临时文件添加正确头信息
            fixed_mtx_path = base_dir / 'filtered_feature_bc_matrix' / 'matrix_fixed.mtx'
            with open(mtx_path, 'r') as original, open(fixed_mtx_path, 'w') as fixed:
                # 添加标准Matrix Market头
                fixed.write("%%MatrixMarket matrix coordinate real general\n")
                for line in original:
                    fixed.write(line)
            mtx_path = fixed_mtx_path

    logger.info("使用anndata专用加载器读取MTX文件...")
    adata_temp = sc.read_mtx(mtx_path)
    X = adata_temp.X.T.tocsr()  # 转置为细胞×基因
    logger.info(f"矩阵加载完成: {X.shape[0]}细胞, {X.shape[1]}特征")

    # 加载barcodes (细胞ID)
    barcodes = pd.read_csv(barcodes_path, header=None, sep='\t')[0].str.replace('-1', '').values

    # 加载基因名称并确保唯一
    features_df = pd.read_csv(features_path, header=None, sep='\t')
    features = features_df[1].values

    # 更严格的唯一性处理
    unique_features = []
    seen = set()
    for i, f in enumerate(features):
        base_name = f
        suffix = 1
        while f in seen:
            f = f"{base_name}_{suffix}"
            suffix += 1
        seen.add(f)
        unique_features.append(f)

    # 加载空间坐标
    spatial_dir = base_dir / 'spatial'
    with open(spatial_dir / 'scalefactors_json.json') as f:
        scalefactors = json.load(f)
        scale = scalefactors.get('tissue_lowres_scalef', 1.0)

    positions = pd.read_parquet(spatial_dir / 'tissue_positions.parquet')

    # 查找并格式化barcode列
    barcode_col = next((col for col in positions.columns if 'barcode' in col.lower()), 'barcode')
    positions = positions.rename(columns={barcode_col: 'barcode'})

    positions['barcode'] = positions['barcode'].astype(str).str.replace('-1', '')

    # 匹配坐标与细胞
    matched_positions = positions[positions['barcode'].isin(barcodes)].set_index('barcode').loc[barcodes].reset_index()

    # 获取坐标列
    x_col = next((col for col in matched_positions.columns if ('x' in col.lower() or 'col' in col.lower())),
                 'pxl_col_in_fullres')
    y_col = next((col for col in matched_positions.columns if ('y' in col.lower() or 'row' in col.lower())),
                 'pxl_row_in_fullres')

    coords = matched_positions[[x_col, y_col]].values.astype(np.float32) * scale

    # 创建AnnData对象
    adata = anndata.AnnData(
        X=X.astype(np.float32),
        obs=pd.DataFrame(index=barcodes),
        var=pd.DataFrame(index=unique_features),  # 使用唯一基因名
        obsm={'spatial': coords}
    )

    adata.uns['tissue_image'] = str(spatial_dir / 'tissue_lowres_image.png')

    # 显式确保基因名唯一
    adata.var_names_make_unique()

    log_memory_usage("数据加载完成")
    return adata


# -----------------------------------------------------------
# 2. 可配置的数据预处理 - 已禁用降采样
# -----------------------------------------------------------
def preprocess_data(adata, enable_downsampling=True):
    """
    可配置的数据预处理:
    ◦ enable_downsampling: 是否启用降采样

    """
    logger.info(f"开始数据预处理 (降采样: {'开启' if enable_downsampling else '关闭'})...")

    if not enable_downsampling:
        # 降采样已禁用，进行基本预处理
        # 对数化数据
        logger.info("对数化表达数据...")
        sc.pp.log1p(adata)
        logger.info("对数化完成")

    adata_out = adata.copy()

    try:
        # 1. 细胞降采样（仅当细胞数超过目标时）
        logger.info(f"基因降采样前: {adata_out.n_vars}")

        # 计算基因总表达量
        if sp.issparse(adata_out.X):
            gene_counts = np.array(adata_out.X.sum(axis=0)).flatten()
        else:
            gene_counts = adata_out.X.sum(axis=0)

        # 按表达量对基因排序
        gene_idx_sorted = np.argsort(gene_counts)[::-1]

        # 保留前30000个基因（即使禁用降采样，此步跳过）
        top_genes = min(30000, adata_out.n_vars)
        gene_idx_keep = gene_idx_sorted[:top_genes]
        adata_out = adata_out[:, gene_idx_keep].copy()

        # 添加基因筛选信息
        adata_out.var['mean_expression'] = gene_counts[gene_idx_keep] / adata_out.n_obs

        logger.info(f"基因降采样后: {adata_out.n_vars}基因")

        # 添加预处理状态
        adata_out.uns['preprocessed'] = True

    except Exception as e:
        logger.error(f"预处理失败: {str(e)}")
        # 出错时回退到原始数据
        return adata.copy()

    finally:
        # 显式内存回收
        gc.collect()
        log_memory_usage("预处理完成")

    return adata_out


# -----------------------------------------------------------
# 3. 内存高效型BANKSY特征生成 - 使用磁盘存储
# -----------------------------------------------------------
def generate_banksy_features(adata, lambda_val=0.3, nbrs=30):
    """
    超内存高效型BANKSY特征生成
    ◦ 使用流式处理和磁盘存储减少内存峰值
    ◦ 添加空间权重矩阵对称化处理
    """
    log_memory_usage("开始生成BANKSY特征")
    locations = adata.obsm['spatial'].astype(np.float32)
    n_cells = adata.n_obs
    n_features = adata.n_vars * 2  # BANKSY特征维度

    # 1. 生成空间权重矩阵
    logger.info("生成空间权重矩阵...")
    graph_out, distance_graph, _ = generate_spatial_weights_fixed_nbrs(
        locations,
        m=0,
        num_neighbours=nbrs,
        decay_type="reciprocal",
        verbose=False
    )

    # 关键修复：确保空间权重矩阵对称
    logger.info("确保空间权重矩阵对称...")
    if not (graph_out != graph_out.T).nnz == 0:  # 检查是否不对称
        logger.warning("空间权重矩阵不对称，正在对称化处理...")
        # 方法1：取平均值（更稳定）
        graph_out = (graph_out + graph_out.T) / 2.0

        # 方法2：取最大值（保留所有连接）
        # graph_out = graph_out.maximum(graph_out.T)

        logger.info("空间权重矩阵对称化完成")

    # 优化：转换为CSR格式提高效率
    graph_out = graph_out.tocsr()

    # 2. 创建硬盘上的HDF5文件存储BANKSY特征
    banksy_file = "banksy_features.h5"
    if os.path.exists(banksy_file):
        os.remove(banksy_file)

    logger.info(f"创建磁盘缓存文件: {banksy_file}")

    # 创建HDF5文件
    with h5py.File(banksy_file, 'w') as hf:
        # 创建数据集
        dset = hf.create_dataset("banksy",
                                 shape=(n_cells, n_features),
                                 dtype='float32',
                                 chunks=(500, n_features),  # 较小的块大小减少内存压力
                                 compression="gzip")

        # 流式处理计算BANKSY特征
        chunk_size = 1000  # 更小的分块尺寸
        progress = 0

        for start in range(0, n_cells, chunk_size):
            end = min(start + chunk_size, n_cells)
            chunk_indices = slice(start, end)
            chunk_length = end - start

            # 获取当前分块的数据
            if sp.issparse(adata.X):
                self_expr = adata.X[chunk_indices].toarray()
            else:
                self_expr = adata.X[chunk_indices].copy()

            # 计算邻居表达
            graph_chunk = graph_out[chunk_indices]

            # 如果分块较小，直接计算整个分块的邻居表达
            nbr_expr_chunk = graph_chunk.dot(adata.X)

            if sp.issparse(nbr_expr_chunk):
                nbr_expr_chunk = nbr_expr_chunk.toarray()

            # 组合BANKSY特征
            banksy_block = np.hstack([
                (1 - lambda_val) * self_expr,
                lambda_val * nbr_expr_chunk
            ]).astype('float32')

            # 写入磁盘
            dset[start:end] = banksy_block

            # 更新进度和日志
            progress += chunk_length
            if progress % 5000 == 0 or end == n_cells:
                logger.info(f"BANKSY特征计算进度: {progress}/{n_cells} ({progress * 100 / n_cells:.1f}%)")
                log_memory_usage(f"进度{progress}")

            # 释放内存
            del self_expr, graph_chunk, nbr_expr_chunk, banksy_block
            gc.collect()

    # 创建基于HDF5的AnnData对象
    logger.info("创建基于HDF5的AnnData对象...")
    h5f = h5py.File(banksy_file, 'r')
    banksy_dataset = h5f['banksy']  # HDF5数据集，类似数组

    banksy_adata = anndata.AnnData(
        banksy_dataset,  # 直接使用HDF5数据集
        obs=adata.obs.copy(),
        var=pd.DataFrame(index=list(adata.var_names) + [f"BANKSY_{gene}" for gene in adata.var_names]),
    )

    banksy_adata.uns = adata.uns.copy()
    banksy_adata.obsm = adata.obsm.copy()
    banksy_adata.uns['_h5f'] = h5f  # 存储文件对象以便后续关闭

    logger.info(f"BANKSY特征生成完成: {banksy_adata.shape}")
    log_memory_usage("BANKSY特征生成完成")
    return banksy_adata


# -----------------------------------------------------------
# 4.1 新增聚类评估指标计算
# -----------------------------------------------------------
def compute_clustering_metrics(adata, features=None):
    """
    计算多种聚类评估指标 - 内存优化版本
    """
    logger.info("计算聚类评估指标...")
    metrics = {}

    if 'BANKSY_clusters' not in adata.obs.columns:
        logger.warning("没有找到聚类标签，跳过评估")
        return metrics

    # 只提取标签数据
    labels = adata.obs['BANKSY_clusters'].astype('category').cat.codes.values
    unique_labels = np.unique(labels)

    # 1. 聚类数量和大小
    cluster_counts = adata.obs['BANKSY_clusters'].value_counts(normalize=True)
    metrics['cluster_count'] = len(unique_labels)
    metrics['size_variation'] = cluster_counts.std() / cluster_counts.mean() if len(cluster_counts) > 1 else 0

    # 只计算基本指标，跳过需要全特征矩阵的指标
    metrics['entropy'] = entropy(cluster_counts.values)

    # 如果只有一个聚类，直接返回
    if len(unique_labels) == 1:
        logger.warning("只有一个聚类，跳过所有需要多个聚类的指标")
        return metrics

    # 2. 只对子样本计算轮廓系数
    try:
        n_sub = min(10000, adata.n_obs)
        if n_sub == adata.n_obs:
            subsample_idx = slice(None)
        else:
            rng = np.random.default_rng(42)
            subsample_idx = rng.choice(adata.n_obs, size=n_sub, replace=False)

        # 确保有特征输入
        if features is None:
            if sp.issparse(adata.X):
                features = adata.X[subsample_idx].toarray()
            else:
                features = adata.X[subsample_idx]

        # 计算轮廓系数
        labels_sub = labels[subsample_idx]
        metrics['silhouette_overall'] = silhouette_score(features, labels_sub)

        # 跳过分群轮廓系数计算（内存消耗大）

    except Exception as e:
        logger.warning(f"轮廓系数计算失败: {str(e)}")

    logger.info(f"聚类评估指标计算完成: {metrics.keys()}")
    return metrics


# -----------------------------------------------------------
# 4.2 新增聚类方法对比 - 内存优化版本
# -----------------------------------------------------------
def compare_with_other_methods(adata, n_clusters=8):
    """
    与K-means进行对比 - 高度内存优化版本
    """
    logger.info("运行内存优化的K-means对比...")
    results = {}

    # 1. 使用较小的特征子集
    if sp.issparse(adata.X):
        gene_means = np.array(adata.X.mean(axis=0)).flatten()
    else:
        gene_means = adata.X.mean(axis=0)

    top_genes = np.argsort(gene_means)[::-1][:2000]  # 只使用前2000个基因

    # 2. 如果数据规模太大，使用随机子样本
    max_cells = 50000
    if adata.n_obs > max_cells:
        logger.warning(f"数据规模太大({adata.n_obs}细胞)，使用随机子样本({max_cells})")
        rng = np.random.default_rng(42)
        sample_idx = rng.choice(adata.n_obs, size=max_cells, replace=False)
        adata_top = adata[sample_idx, top_genes].copy()
        adata_sample = adata[sample_idx]
    else:
        adata_top = adata[:, top_genes].copy()
        adata_sample = adata

    # 3. 使用标准PCA进行降维
    logger.info("执行PCA降维用于对比方法...")
    pca = PCA(n_components=50)

    if sp.issparse(adata_top.X):
        X = adata_top.X.toarray()
    else:
        X = adata_top.X

    reduced_data = pca.fit_transform(X.astype('float32'))
    log_memory_usage("降维完成")

    # 4. K-means聚类 - 使用MiniBatch版本
    from sklearn.cluster import MiniBatchKMeans
    logger.info("运行MiniBatch K-means聚类...")
    kmeans = MiniBatchKMeans(n_clusters=n_clusters, random_state=42, batch_size=10000)
    kmeans_labels = kmeans.fit_predict(reduced_data)
    results['kmeans'] = kmeans_labels

    # 5. 保存结果
    adata.obs['kmeans_clusters'] = ""  # 初始化为空字符串

    if 'sample_idx' in locals():
        # 仅为样本填充结果
        for i, idx in enumerate(sample_idx):
            adata.obs.iloc[idx, adata.obs.columns.get_loc('kmeans_clusters')] = str(kmeans_labels[i])
    else:
        adata.obs['kmeans_clusters'] = pd.Categorical(kmeans_labels.astype(str))

    logger.info("K-means对比聚类完成")
    return adata, results


# -----------------------------------------------------------
# 4.3 优化BANKSY聚类函数 - 内存优化
# -----------------------------------------------------------
def banksy_clustering(adata, banksy_adata, n_clusters=8):
    """
    内存优化的Banksy空间聚类算法
    """
    logger.info("开始内存优化的Banksy空间聚类...")
    log_memory_usage("开始聚类")

    # 1. 使用特征子集进行聚类 - 减少内存压力
    # 选择前5000个高表达基因
    if sp.issparse(adata.X):
        gene_means = np.array(adata.X.mean(axis=0)).flatten()
    else:
        gene_means = adata.X.mean(axis=0)

    top_genes = np.argsort(gene_means)[::-1][:5000]

    # 2. 准备聚类输入矩阵 - 只使用子集
    logger.info("准备聚类输入矩阵(特征子集)...")

    # 提取特征子集 - 修复索引错误
    banksy_data = banksy_adata.X

    # 创建新的特征索引 - 确保索引按升序排列
    feature_indices = np.concatenate([
        top_genes,  # 原始特征 (0 - 4999)
        top_genes + adata.n_vars  # BANKSY特征 (37079 - 42078)
    ])
    feature_indices.sort()  # 确保索引按升序排列

    # 分块提取数据
    chunk_size = 10000
    X = np.zeros((banksy_data.shape[0], len(feature_indices)), dtype='float32')

    for start in range(0, banksy_data.shape[0], chunk_size):
        end = min(start + chunk_size, banksy_data.shape[0])
        # 提取当前块的特征子集（索引已排序）
        try:
            data_chunk = banksy_data[start:end, :]
            if hasattr(data_chunk, 'shape'):
                X[start:end, :] = data_chunk[:, feature_indices]
            else:
                # 如果banksy_data是HDF5数据集
                for i in range(start, end):
                    X[i, :] = banksy_data[i, feature_indices]
        except TypeError as e:
            # 处理索引必须是升序的问题
            logger.error(f"提取数据失败: {e}")
            # 使用备用方法 - 逐行提取
            for i in range(start, end):
                X[i, :] = banksy_data[i, feature_indices]

    # 3. 特征标准化 - 使用增量方法
    logger.info("增量特征标准化...")
    scaler = StandardScaler(with_mean=False, with_std=True)  # 只标准化标准差

    # 分块计算标准差
    chunk_size = 10000
    for i in range(0, X.shape[0], chunk_size):
        end = min(i + chunk_size, X.shape[0])
        scaler.partial_fit(X[i:end])

    # 应用标准化 - 只除以标准差
    for i in range(0, X.shape[0], chunk_size):
        end = min(i + chunk_size, X.shape[0])
        X[i:end] = X[i:end] / scaler.scale_

    log_memory_usage("特征标准化后")

    # 4. 降维处理 - 使用增量PCA
    logger.info("执行增量PCA降维...")
    n_components = min(50, X.shape[1])
    pca = IncrementalPCA(n_components=n_components, batch_size=5000)

    # 分块执行PCA partial_fit
    chunk_size_fit = 10000
    total_cells = X.shape[0]
    logger.info(f"开始PCA partial_fit, 共{total_cells}个细胞...")
    for i, start in enumerate(range(0, total_cells, chunk_size_fit)):
        end = min(start + chunk_size_fit, total_cells)
        pca.partial_fit(X[start:end])
        progress = end if end < total_cells else total_cells
        logger.info(f"PCA partial_fit进度: {progress}/{total_cells} ({progress * 100 / total_cells:.1f}%)")

    # 应用PCA转换
    logger.info(f"开始PCA transform, 共{total_cells}个细胞...")
    reduced = np.zeros((total_cells, pca.n_components), dtype='float32')
    chunk_size_transform = 10000
    for i, start in enumerate(range(0, total_cells, chunk_size_transform)):
        end = min(start + chunk_size_transform, total_cells)
        reduced[start:end] = pca.transform(X[start:end])
        progress = end if end < total_cells else total_cells
        logger.info(f"PCA transform进度: {progress}/{total_cells} ({progress * 100 / total_cells:.1f}%)")

    log_memory_usage("PCA降维后")
    adata.obsm['X_pca'] = reduced  # 保存用于可视化

    # 5. 简化聚类参数探索
    logger.info(f"执行Leiden聚类(目标聚类数={n_clusters})...")

    # 创建分区器对象 - 修复：确保partitioner被正确定义
    partitioner = LeidenPartition(
        input_space=reduced,
        num_nn=20,
        nns_have_weights=True,
        compute_shared_nn=True,
        filter_shared_nn=True,
        shared_nn_max_rank=3,
        shared_nn_min_shared_nbrs=5,
        verbose=False
    )

    # 动态调整分辨率以获得更接近目标聚类数的结果
    base_resolution = 0.1
    max_resolution = 1.5
    resolution_step = 0.05
    max_iterations = 5  # 最大尝试次数

    best_resolution = 0.3  # 默认值
    best_n_clusters = 0
    closest_diff = float('inf')

    # 尝试不同分辨率
    for i in range(max_iterations):
        res = base_resolution + i * resolution_step
        if res > max_resolution:
            break

        # 使用更高效的聚类调用
        label, _ = partitioner.partition(
            resolution=res,
            n_iterations=15,  # 减少迭代次数
            seed=42
        )
        clusters = np.array(label.dense)
        n_clusters_found = len(np.unique(clusters))

        # 记录最接近目标的结果
        diff = abs(n_clusters_found - n_clusters)
        if diff < closest_diff:
            closest_diff = diff
            best_resolution = res
            best_n_clusters = n_clusters_found

        # 如果足够接近目标，提前结束
        if diff <= 2:
            break

    # 使用最佳分辨率进行最终聚类
    label, _ = partitioner.partition(
        resolution=best_resolution,
        n_iterations=30,
        seed=42
    )
    clusters = np.array(label.dense)
    n_clusters_found = len(np.unique(clusters))
    logger.info(f"最佳分辨率={best_resolution:.2f} -> 聚类数={n_clusters_found}")

    # 5. 保存结果
    adata.obs['BANKSY_clusters'] = pd.Categorical(clusters.astype(str))

    # 关键修复1: 将聚类标签转换为分类类型
    adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')

    # 6. 聚类结果统计
    cluster_counts = adata.obs['BANKSY_clusters'].value_counts()
    logger.info("聚类结果统计:\n%s" % cluster_counts)

    # 7. 计算聚类评估指标
    logger.info("计算BANKSY聚类评估指标...")
    metrics = {}

    # 聚类数量
    n_clusters_found = len(adata.obs['BANKSY_clusters'].cat.categories)
    metrics['cluster_count'] = n_clusters_found

    # 聚类大小变异系数
    cluster_sizes = cluster_counts.values
    size_variation = np.std(cluster_sizes) / np.mean(cluster_sizes)
    metrics['size_variation'] = size_variation

    # 关键修复2: 轮廓系数计算（使用抽样）
    try:
        n_samples = reduced.shape[0]
        if n_samples > 10000:
            # 抽样10000个细胞计算轮廓系数
            sample_idx = np.random.choice(n_samples, 10000, replace=False)
            sample_emb = reduced[sample_idx]
            sample_labels = adata.obs['BANKSY_clusters'].values[sample_idx]
            silhouette_avg = silhouette_score(sample_emb, sample_labels)
        else:
            silhouette_avg = silhouette_score(reduced, adata.obs['BANKSY_clusters'])
        metrics['silhouette'] = silhouette_avg
    except Exception as e:
        logger.warning(f"轮廓系数计算失败: {str(e)}")
        metrics['silhouette'] = -1

    # 聚类熵
    cluster_props = cluster_sizes / cluster_sizes.sum()
    entropy = -np.sum(cluster_props * np.log(cluster_props))
    metrics['entropy'] = entropy

    adata.uns['banksy_metrics'] = metrics
    logger.info("聚类评估指标计算完成: %s" % list(metrics.keys()))

    # 8. 计算聚类连通性矩阵
    logger.info("计算聚类连通性矩阵...")
    try:
        # 获取聚类标签
        clusters = adata.obs['BANKSY_clusters'].cat.codes.values
        unique_clusters = np.unique(clusters)
        n_clusters_found = len(unique_clusters)

        # 获取空间坐标
        coords = adata.obsm['spatial']

        # 计算聚类间连通性
        connectivity = np.zeros((n_clusters_found, n_clusters_found))

        # 构建k近邻图
        nbrs = NearestNeighbors(n_neighbors=25).fit(coords)
        distances, indices = nbrs.kneighbors(coords)

        # 计算聚类间连接
        for i in range(len(clusters)):
            source_cluster = clusters[i]
            neighbors = indices[i]
            neighbor_clusters = clusters[neighbors]

            for j in range(1, len(neighbors)):  # 跳过自身
                target_cluster = neighbor_clusters[j]
                connectivity[source_cluster, target_cluster] += 1

        # 标准化
        row_sums = connectivity.sum(axis=1)
        connectivity = connectivity / row_sums[:, np.newaxis]

        # 关键修复3: 对称化处理（取平均值）
        connectivity = (connectivity + connectivity.T) / 2

        # 存储为DataFrame
        cluster_names = adata.obs['BANKSY_clusters'].cat.categories
        connectivity_df = pd.DataFrame(
            connectivity,
            index=cluster_names,
            columns=cluster_names
        )

        adata.uns['connectivity_matrix'] = connectivity_df
        logger.info("聚类连通性矩阵计算完成")
    except Exception as e:
        logger.error(f"计算聚类连通性矩阵失败: {str(e)}")

    log_memory_usage("聚类完成")
    return adata


# -----------------------------------------------------------
# 5. 专业可视化功能 - 论文级别的图表
# -----------------------------------------------------------

def plot_cluster_size_distribution(adata, output_dir):
    """聚类大小分布图（条形图）"""
    try:
        logger.info("生成聚类大小分布图...")

        # 获取聚类大小
        cluster_counts = adata.obs['BANKSY_clusters'].value_counts().sort_index()
        cluster_percentages = (cluster_counts / cluster_counts.sum() * 100).round(1)

        # 创建DataFrame用于绘图
        df = pd.DataFrame({
            'Cluster': cluster_counts.index,
            'Count': cluster_counts.values,
            'Percentage': cluster_percentages.values
        })

        # 创建图形
        fig, ax = plt.subplots(figsize=(12, 6))

        # 绘制条形图
        bars = ax.bar(df['Cluster'], df['Count'], color=professional_cmap.colors, alpha=0.8)

        # 在条形上添加计数和百分比
        for i, (count, perc) in enumerate(zip(df['Count'], df['Percentage'])):
            ax.text(i, count + max(df['Count']) * 0.01,
                    f"{count}\n({perc}%)",
                    ha='center',
                    fontsize=9)

        # 设置标题和标签
        ax.set_title('Cluster Size Distribution', fontsize=14, fontweight='bold')
        ax.set_xlabel('Cluster ID', fontsize=12)
        ax.set_ylabel('Number of Cells', fontsize=12)
        ax.set_ylim(0, max(df['Count']) * 1.15)
        ax.grid(axis='y', linestyle='--', alpha=0.7)

        plt.xticks(rotation=45)
        plt.tight_layout()

        # 保存图形
        save_path = os.path.join(output_dir, 'cluster_distribution.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"聚类大小分布图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成聚类大小分布图失败: {str(e)}")


def plot_spatial_scatter_by_cluster(adata, output_dir):
    """按聚类的空间散点图（每个聚类一个子图）"""
    try:
        logger.info("生成按聚类的空间散点图...")

        # 确保聚类标签是字符串
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype(str)
        unique_clusters = adata.obs['BANKSY_clusters'].unique()

        # 根据聚类数量设置图形布局
        n_clusters = len(unique_clusters)
        n_rows = (n_clusters + 3) // 4  # 每行最多4个聚类
        fig, axes = plt.subplots(n_rows, 4, figsize=(16, n_rows * 4), squeeze=False)

        # 坐标范围
        x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
        y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()

        # 绘制每个聚类的散点图
        for i, cluster_id in enumerate(sorted(unique_clusters)):
            row = i // 4
            col = i % 4

            ax = axes[row, col]
            coords = adata.obsm['spatial']
            cluster_mask = (adata.obs['BANKSY_clusters'] == cluster_id)

            # 绘制背景点（所有细胞）
            ax.scatter(coords[:, 0], coords[:, 1], s=1, c='lightgray', alpha=0.1)

            # 绘制当前聚类点
            ax.scatter(coords[cluster_mask, 0], coords[cluster_mask, 1],
                       s=8,
                       color=professional_cmap(i),
                       alpha=0.8,
                       label=f'Cluster {cluster_id}')

            # 添加子图标题
            ax.set_title(f'Cluster {cluster_id}', fontsize=10)
            ax.set_xlim(x_min, x_max)
            ax.set_ylim(y_min, y_max)
            ax.axis('off')

        # 处理空白子图
        for i in range(n_clusters, n_rows * 4):
            row = i // 4
            col = i % 4
            fig.delaxes(axes[row, col])

        plt.suptitle('Spatial Distribution by Cluster', fontsize=16, fontweight='bold')
        plt.tight_layout()
        save_path = os.path.join(output_dir, 'spatial_scatter_by_cluster.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"按聚类的空间散点图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成按聚类的空间散点图失败: {str(e)}")


def plot_umap_visualization(adata, output_dir):
    """UMAP可视化"""
    try:
        logger.info("生成UMAP可视化...")

        # 确保有PCA降维结果
        if 'X_pca' not in adata.obsm:
            logger.warning("缺少PCA结果，无法生成UMAP")
            return

        # 创建图形
        fig, axes = plt.subplots(1, 2, figsize=(18, 7))

        # UMAP按聚类着色 - 特征空间
        logger.info("计算特征空间UMAP...")
        sc.pp.neighbors(adata, n_neighbors=15, use_rep='X_pca')
        sc.tl.umap(adata)

        # 特征空间UMAP
        sc.pl.umap(
            adata,
            color='BANKSY_clusters',
            palette=professional_cmap.colors,
            ax=axes[0],
            show=False,
            legend_loc=None,
            title='UMAP: Feature Space'
        )

        # 添加图例
        handles, labels = axes[0].get_legend_handles_labels()
        if len(handles) > 0:
            fig.legend(
                handles,
                labels,
                loc='upper center',
                ncol=min(10, len(labels)),
                bbox_to_anchor=(0.5, 0.95),
                frameon=False
            )

        # 空间坐标UMAP
        logger.info("计算空间坐标UMAP...")
        spatial_coords = adata.obsm['spatial'].copy()
        spatial_coords -= np.min(spatial_coords, axis=0)
        spatial_coords /= np.max(spatial_coords, axis=0)

        reducer = umap.UMAP(random_state=42)
        spatial_umap = reducer.fit_transform(spatial_coords)
        adata.obsm['X_spatial_umap'] = spatial_umap

        # 绘制空间坐标UMAP
        scatter = axes[1].scatter(
            spatial_umap[:, 0],
            spatial_umap[:, 1],
            c=adata.obs['BANKSY_clusters'].astype('category').cat.codes,
            cmap=professional_cmap,
            s=8,
            alpha=0.7
        )
        axes[1].set_title('UMAP: Spatial Coordinates', fontsize=12)
        axes[1].set_xlabel('UMAP1')
        axes[1].set_ylabel('UMAP2')

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'umap_visualization.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"UMAP可视化已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成UMAP可视化失败: {str(e)}")


def plot_spatial_clusters(adata, output_dir, background_alpha=0.2, point_size=3, min_cluster_size=0.01):
    """
    优化的空间聚类可视化
    """
    if 'BANKSY_clusters' not in adata.obs.columns:
        raise ValueError("聚类结果未找到")

    # 确保聚类ID为字符串类型
    adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype(str)

    # 过滤小聚类（可选）
    cluster_counts = adata.obs['BANKSY_clusters'].value_counts()
    min_count = int(adata.n_obs * min_cluster_size)
    valid_clusters = cluster_counts[cluster_counts >= min_count].index

    # 创建新的临时AnnData对象只包含有效聚类
    adata_valid = adata[adata.obs['BANKSY_clusters'].isin(valid_clusters)].copy()

    # 重新映射聚类ID为连续整数以简化可视化
    cluster_id_map = {cid: str(i) for i, cid in enumerate(valid_clusters)}
    adata_valid.obs['cluster_id'] = adata_valid.obs['BANKSY_clusters'].map(cluster_id_map)

    # 获取聚类信息
    clusters = adata_valid.obs['cluster_id'].unique()
    clusters = sorted(clusters, key=lambda x: int(x))

    # 创建图表
    fig, ax = plt.subplots(figsize=(12, 10))

    # 加载组织图像
    img_path = adata.uns.get('tissue_image', '')
    if img_path and os.path.exists(img_path):
        try:
            img = mpimg.imread(img_path)
            x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
            y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()

            ax.imshow(img,
                      extent=[x_min, x_max, y_min, y_max],
                      alpha=background_alpha,
                      origin='lower',
                      cmap='gray')
            logger.info(f"组织背景图像已加载: {img_path}")
        except Exception as e:
            logger.warning(f"加载背景图像失败: {e}")

    # 定义颜色方案
    colors = professional_cmap.colors

    # 绘制所有点（按聚类着色）
    all_coords = adata_valid.obsm['spatial']
    cluster_ids = adata_valid.obs['cluster_id'].values

    # 使用向量化操作一次绘制所有点
    for cluster in clusters:
        mask = (cluster_ids == cluster)
        points = all_coords[mask]
        ax.scatter(
            points[:, 0],
            points[:, 1],
            s=point_size,
            color=colors[int(cluster) % len(colors)],
            alpha=0.7,
            label=f'Cluster {cluster}'
        )

    # 添加图例
    if len(clusters) <= 20:
        ax.legend(
            title='Clusters',
            loc='center left',
            bbox_to_anchor=(1.05, 0.5),
            ncol=1 if len(clusters) > 10 else 2,
            markerscale=2.0,
            fontsize=9,
            frameon=False
        )

    # 添加标题和坐标轴
    ax.set_title('BANKSY Spatial Clustering', fontsize=14, fontweight='bold')
    ax.set_xlabel('X Coordinate', fontsize=12)
    ax.set_ylabel('Y Coordinate', fontsize=12)

    # 设置紧凑布局
    plt.tight_layout()

    # 保存图形
    save_path = os.path.join(output_dir, 'spatial_clusters.png')
    fig.savefig(save_path, bbox_inches='tight')
    plt.close(fig)
    logger.info(f"空间聚类图已保存: {save_path}")

    return fig


def plot_cluster_correlation(adata, output_dir):
    """聚类效果相关性热图"""
    try:
        logger.info("Generating cluster correlation heatmap...")

        # 计算聚类平均表达谱的相关性 - 使用安全的方法
        cluster_means = []
        clusters = adata.obs['BANKSY_clusters'].astype('category').cat.categories.tolist()

        for cluster in clusters:
            # 安全处理稀疏矩阵
            cluster_mask = adata.obs['BANKSY_clusters'] == cluster
            cluster_cells = adata[cluster_mask]

            if sp.issparse(adata.X):
                cluster_vals = np.array(cluster_cells.X.mean(axis=0)).flatten()
            else:
                cluster_vals = cluster_cells.X.mean(axis=0)

            cluster_means.append(cluster_vals)

        # 计算相关性矩阵
        cluster_means = np.array(cluster_means)
        corr_matrix = np.corrcoef(cluster_means)

        # 绘制热图
        plt.figure(figsize=(10, 8))
        sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap='coolwarm',
                    vmin=-1, vmax=1, xticklabels=clusters, yticklabels=clusters)
        plt.title('Inter-Cluster Expression Correlation', fontsize=16)
        plt.tight_layout()

        save_path = os.path.join(output_dir, 'cluster_correlation.png')
        plt.savefig(save_path, dpi=300)
        plt.close()
        logger.info(f"Cluster correlation heatmap saved: {save_path}")

    except Exception as e:
        logger.error(f"Failed to generate cluster correlation heatmap: {str(e)}")


def plot_connectivity_matrix(adata, output_dir, figsize=(12, 10)):
    """修正对角线问题的连通性矩阵图"""
    try:
        # 确保连通性矩阵存在
        if 'connectivity_matrix' not in adata.uns:
            logger.warning("未找到连通性矩阵数据，跳过绘图")
            return

        connectivity = adata.uns['connectivity_matrix']
        cluster_labels = connectivity.index

        # 确保对角线为1.0
        np.fill_diagonal(connectivity.values, 1.0)

        # 确保对称性
        if not connectivity.equals(connectivity.T):
            logger.warning("连通性矩阵不对称，正在取平均值使其对称")
            connectivity = (connectivity + connectivity.T) / 2
            np.fill_diagonal(connectivity.values, 1.0)

        plt.figure(figsize=figsize)
        ax = sns.heatmap(connectivity, annot=True, fmt=".2f", cmap="YlGnBu",
                         linewidths=.5, square=True, vmin=0, vmax=1)

        plt.title('Cluster Connectivity Matrix', fontsize=16)
        plt.xlabel('Target Cluster', fontsize=14)
        plt.ylabel('Source Cluster', fontsize=14)

        plt.tight_layout()
        plt.savefig(f"{output_dir}/connectivity_matrix.pdf", bbox_inches='tight')
        plt.close()
        logger.info(f"Connectivity matrix saved: {output_dir}/connectivity_matrix.pdf")

    except Exception as e:
        logger.error(f"Failed to plot connectivity matrix: {str(e)}")


def plot_differential_expression(adata, output_dir, n_genes=5):
    """差异基因表达热图 (基于实际差异分析)"""
    try:
        logger.info("生成差异基因表达热图...")

        # 执行差异基因分析
        logger.info("进行差异基因表达分析...")
        adata.obs['clusters'] = adata.obs['BANKSY_clusters'].astype('category')
        sc.tl.rank_genes_groups(adata, groupby='clusters', method='t-test')

        # 获取每个聚类的top基因
        top_genes = set()
        for cluster in adata.obs['clusters'].cat.categories:
            cluster_genes = sc.get.rank_genes_groups_df(adata, group=cluster).head(n_genes)['names']
            top_genes.update(cluster_genes)

        # 提取这些基因的表达数据
        adata_top = adata[:, list(top_genes)].copy()

        # 对数归一化
        sc.pp.log1p(adata_top)

        # 计算每聚类平均表达
        cluster_means = []
        clusters = adata_top.obs['clusters'].cat.categories

        # 修复点：将Series转换为numpy数组
        for cluster in clusters:
            # 使用.values获取numpy数组
            cluster_mask = (adata_top.obs['clusters'] == cluster).values

            if sp.issparse(adata_top.X):
                mean_expr = np.array(adata_top.X[cluster_mask].mean(axis=0)).flatten()
            else:
                mean_expr = adata_top.X[cluster_mask].mean(axis=0)
            cluster_means.append(mean_expr)

        # 创建DataFrame
        df = pd.DataFrame(cluster_means, index=clusters, columns=adata_top.var_names)

        # Z-score标准化
        zscore_df = df.apply(lambda x: (x - x.mean()) / x.std(), axis=1)

        # 绘制热图
        plt.figure(figsize=(max(8, len(top_genes)), max(6, len(clusters))))
        sns.heatmap(zscore_df, cmap='vlag', center=0, annot=True, fmt=".1f")
        plt.title('Differential Gene Expression', fontsize=16)
        plt.ylabel('Clusters')
        plt.xlabel('Genes')
        plt.tight_layout()

        save_path = os.path.join(output_dir, 'differential_expression_heatmap.png')
        plt.savefig(save_path, dpi=300)
        plt.close()
        logger.info(f"差异基因表达热图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成差异基因表达热图失败: {str(e)}")
        # 添加详细错误信息输出
        import traceback
        logger.error(traceback.format_exc())


def plot_cluster_projection(adata, output_dir):
    """聚类结果降维投影图 (基于实际PCA/UMAP)"""
    try:
        logger.info("生成聚类降维投影图...")

        # 确保已有PCA结果
        if 'X_pca' not in adata.obsm:
            logger.warning("没有PCA结果，无法生成投影图")
            return

        # 创建画布
        fig, axes = plt.subplots(1, 2, figsize=(16, 6))

        # 确保聚类标签是分类类型
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')
        clusters = adata.obs['BANKSY_clusters'].cat.codes

        # PCA投影
        ax = axes[0]
        # 使用scanpy内置绘图函数确保一致性
        sc.pl.pca(adata, color='BANKSY_clusters', show=False, ax=ax)
        ax.set_title('PCA Projection', fontsize=14)
        ax.set_xlabel('PC1', fontsize=12)
        ax.set_ylabel('PC2', fontsize=12)

        # UMAP投影
        ax = axes[1]

        # 确保UMAP存在
        if 'X_umap' in adata.obsm:
            sc.pl.umap(adata, color='BANKSY_clusters', show=False, ax=ax)
            ax.set_title('UMAP Projection', fontsize=14)
        else:
            # 如果没有UMAP，使用PCA的另外两个主成分作为替代
            n_components = adata.obsm['X_pca'].shape[1]
            if n_components > 3:
                # 使用第3和第4主成分
                ax.scatter(
                    adata.obsm['X_pca'][:, 2],
                    adata.obsm['X_pca'][:, 3],
                    c=clusters, cmap=professional_cmap, s=10, alpha=0.7
                )
                ax.set_title('PCA Projection (PC3 vs PC4)', fontsize=14)
                ax.set_xlabel('PC3')
                ax.set_ylabel('PC4')
            else:
                # 如果只有2个主成分，使用第1和第2主成分
                ax.scatter(
                    adata.obsm['X_pca'][:, 0],
                    adata.obsm['X_pca'][:, 1],
                    c=clusters, cmap=professional_cmap, s=10, alpha=0.7
                )
                ax.set_title('PCA Projection', fontsize=14)
                ax.set_xlabel('PC1')
                ax.set_ylabel('PC2')

        plt.suptitle('Cluster Projections', fontsize=18)
        plt.tight_layout()

        save_path = os.path.join(output_dir, 'cluster_projection.png')
        fig.savefig(save_path, dpi=300)
        plt.close(fig)
        logger.info(f"聚类降维投影图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成聚类投影图失败: {str(e)}")


def plot_neural_cell_analysis(adata, output_dir):
    """神经细胞综合分析 (基于实际聚类结果)"""
    try:
        logger.info("生成神经细胞分析图...")

        # 确保有聚类结果
        if 'BANKSY_clusters' not in adata.obs:
            logger.warning("没有聚类结果，跳过神经细胞分析")
            return

        # 确保聚类标签是分类类型
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')

        # 创建画布
        fig = plt.figure(figsize=(18, 12))
        gs = fig.add_gridspec(2, 2)

        # 1. 空间分布 (左上)
        ax1 = fig.add_subplot(gs[0, 0])

        # 加载组织图像 (如果可用)
        img_path = adata.uns.get('tissue_image', '')
        if img_path and os.path.exists(img_path):
            img = mpimg.imread(img_path)
            x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
            y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()
            ax1.imshow(img, extent=[x_min, x_max, y_min, y_max], alpha=0.2, origin='lower')
            logger.info(f"组织背景图像已加载: {img_path}")

        # 标记聚类0为神经元聚类
        neuron_cluster = '0'  # 假设聚类0是神经元聚类
        neuron_mask = adata.obs['BANKSY_clusters'] == neuron_cluster

        # 绘制所有细胞 (灰色小点)
        ax1.scatter(
            adata.obsm['spatial'][:, 0],
            adata.obsm['spatial'][:, 1],
            c='lightgray', s=1, alpha=0.3
        )

        # 突出显示神经元聚类
        ax1.scatter(
            adata.obsm['spatial'][neuron_mask, 0],
            adata.obsm['spatial'][neuron_mask, 1],
            c='red', s=5, alpha=0.7, label=f'Neuron Cluster ({neuron_cluster})'
        )
        ax1.set_title('Spatial Distribution of Neurons', fontsize=14)
        ax1.axis('off')
        ax1.legend()

        # 2. 神经元标记基因表达 (右上)
        ax2 = fig.add_subplot(gs[0, 1])

        # 神经元标记基因
        neuron_markers = ['B2M', 'GFAP', 'VIM', 'SNAP25', 'SYT1', 'MAP2']
        available_markers = [gene for gene in neuron_markers if gene in adata.var_names]

        if available_markers:
            # 创建子网格布局
            inner_gs = gs[0, 1].subgridspec(len(available_markers), 1, hspace=0.2)

            for i, gene in enumerate(available_markers):
                ax = fig.add_subplot(inner_gs[i])

                # 绘制基因表达小提琴图
                sc.pl.violin(adata, gene, groupby='BANKSY_clusters',
                             rotation=0, stripplot=False, ax=ax, show=False)

                ax.set_ylabel('Expression', fontsize=10)
                ax.set_xlabel('')
                ax.set_title(f'{gene}', fontsize=12)

                # 移除标题和x轴标签
                ax.get_xaxis().set_visible(False)
                ax.spines['right'].set_visible(False)
                ax.spines['top'].set_visible(False)

            # 设置共享标题
            ax2 = fig.add_subplot(gs[0, 1])
            ax2.set_axis_off()
            ax2.set_title('Neuron Marker Expression', fontsize=14, pad=20)

        else:
            ax2.text(0.5, 0.5, 'Neuron marker genes not found',
                     ha='center', va='center', fontsize=12)
            ax2.set_title('Neuron Marker Expression', fontsize=14)
            ax2.set_axis_off()

        # 3. 元基因分析 (左下)
        ax3 = fig.add_subplot(gs[1, 0])

        # 使用PC1作为元基因
        if 'X_pca' in adata.obsm:
            # 取主成分1
            pca1 = adata.obsm['X_pca'][:, 0]

            # 创建伪彩色图
            scatter = ax3.scatter(
                adata.obsm['spatial'][:, 0],
                adata.obsm['spatial'][:, 1],
                c=pca1, cmap='coolwarm', s=5
            )
            ax3.set_title('Meta-gene (PC1) Spatial Pattern', fontsize=14)
            plt.colorbar(scatter, ax=ax3, label='PC1 Value')

        ax3.axis('off')

        # 4. 替代分析 - 聚类特异性标记基因热图 (右下)
        ax4 = fig.add_subplot(gs[1, 1])

        try:
            # 执行差异表达分析
            if 'rank_genes_groups' not in adata.uns:
                logger.info("执行差异表达分析...")
                sc.tl.rank_genes_groups(adata, 'BANKSY_clusters', method='t-test')

            # 获取每个聚类的top标志基因
            top_genes = []
            cluster_names = adata.obs['BANKSY_clusters'].cat.categories

            for cluster in cluster_names:
                # 获取当前聚类的差异表达基因
                df = sc.get.rank_genes_groups_df(adata, group=cluster)

                # 过滤掉低表达或异常基因
                df = df[(df['pvals_adj'] < 0.05) & (df['logfoldchanges'].abs() > 0.5)]

                if len(df) > 0:
                    # 按p值排序并取顶部基因
                    top_cluster_genes = df.sort_values('pvals_adj').head(1)['names'].tolist()
                    top_genes.extend(top_cluster_genes)

            # 去重并保留排序
            top_genes = list(dict.fromkeys(top_genes))

            if not top_genes:
                raise ValueError("未找到显著的标志基因")

            # 创建表达矩阵
            cluster_means = []

            for cluster in cluster_names:
                cluster_cells = adata.obs['BANKSY_clusters'] == cluster
                cluster_data = adata[cluster_cells, top_genes]

                # 计算Z-score标准化后的平均表达
                if sp.issparse(cluster_data.X):
                    cluster_mean = np.array(cluster_data.X.mean(axis=0)).flatten()
                else:
                    cluster_mean = cluster_data.X.mean(axis=0)

                # 标准化处理
                cluster_mean = (cluster_mean - cluster_mean.mean()) / cluster_mean.std()
                cluster_means.append(cluster_mean)

            expr_matrix = np.vstack(cluster_means)

            # 创建热图
            sns.heatmap(expr_matrix,
                        cmap="RdBu_r",  # 红蓝渐变
                        annot=True,
                        linewidths=0.5,
                        xticklabels=top_genes,
                        yticklabels=cluster_names,
                        cbar_kws={'label': 'Z-score'},
                        center=0,
                        ax=ax4)

            ax4.set_title('Top Marker Genes by Cluster', fontsize=14)
            ax4.set_xlabel("Genes", fontsize=12)
            ax4.set_ylabel("Clusters", fontsize=12)
            ax4.set_yticks(np.arange(len(cluster_names)))  # 设置y轴刻度位置
            ax4.set_yticklabels(cluster_names, rotation=0)  # 设置y轴标签

        except Exception as e:
            logger.error(f"无法生成标记基因热图: {str(e)}")
            ax4.text(0.5, 0.5, 'Top Marker Genes Unavailable',
                     ha='center', va='center', fontsize=12)
            ax4.set_title('Marker Genes Analysis', fontsize=14)
            ax4.set_axis_off()

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'neural_cell_analysis.png')
        plt.savefig(save_path, dpi=300)
        plt.close(fig)
        logger.info(f"神经细胞分析图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成神经细胞分析图失败: {str(e)}")


def plot_gene_expression_violin(adata, output_dir):
    """新增的基因表达小提琴图"""
    try:
        # 对数化数据 - 解决警告问题
        sc.pp.log1p(adata)

        # 找到每个聚类的标志基因
        sc.tl.rank_genes_groups(adata, 'BANKSY_clusters', method='t-test')

        # 获取每个聚类的前3个标志基因
        marker_genes = []
        for cluster in np.unique(adata.obs['BANKSY_clusters']):
            top_genes = sc.get.rank_genes_groups_df(adata, group=str(cluster))
            top_genes = top_genes[~top_genes['names'].str.upper().str.startswith('MT-')]  # 排除线粒体基因
            marker_genes.extend(top_genes['names'].head(3).tolist())

        # 去重
        marker_genes = list(set(marker_genes))

        # 绘制小提琴图
        plt.figure(figsize=(15, 8))
        sc.pl.violin(adata, marker_genes, groupby='BANKSY_clusters',
                     rotation=90, stripplot=False, size=2, log=False)

        plt.title("Marker Gene Expression Distribution", fontsize=16)
        plt.ylabel("Normalized Expression", fontsize=14)
        plt.xlabel("Cluster", fontsize=14)

        plt.tight_layout()
        plt.savefig(f"{output_dir}/gene_expression_violin.pdf", bbox_inches='tight')
        plt.close()
        logger.info(f"Gene expression violin plot saved: {output_dir}/gene_expression_violin.pdf")

    except Exception as e:
        logger.error(f"绘制基因表达小提琴图失败: {str(e)}")


def plot_differential_expression_heatmap(adata, output_dir, groupby='BANKSY_clusters',
                                         n_genes_per_cluster=5, figsize=(15, 12)):
    """
    基于差异表达分析绘制分组基因热图
    显示每个聚类中表达最显著的标志基因
    """
    try:
        logger.info("生成差异表达热图...")

        # 确保有聚类结果
        if groupby not in adata.obs:
            logger.warning(f"分组列 {groupby} 不存在，跳过热图")
            return

        # 执行差异表达分析
        logger.info(f"执行差异表达分析 (分组: {groupby})...")
        sc.tl.rank_genes_groups(adata, groupby, method='t-test', use_raw=False)

        # 获取每个聚类的top标志基因
        top_genes = []
        cluster_names = adata.obs[groupby].cat.categories

        for cluster in cluster_names:
            # 获取当前聚类的差异表达基因
            df = sc.get.rank_genes_groups_df(adata, group=cluster)

            # 过滤掉低表达或异常基因
            df = df[(df['pvals_adj'] < 0.05) & (df['logfoldchanges'].abs() > 0.5)]

            if len(df) > 0:
                # 按p值排序并取顶部基因
                top_cluster_genes = df.sort_values('pvals_adj').head(n_genes_per_cluster)['names'].tolist()
                top_genes.extend(top_cluster_genes)

        # 去重并保留排序
        top_genes = list(dict.fromkeys(top_genes))

        if not top_genes:
            logger.warning("未找到显著的标志基因，跳过热图")
            return

        # 创建表达矩阵
        logger.info(f"创建热图矩阵 ({len(top_genes)} 个基因)...")

        # 为每个聚类计算基因平均表达
        cluster_expr = []

        for cluster in cluster_names:
            cluster_cells = adata.obs[groupby] == cluster
            cluster_data = adata[cluster_cells, top_genes]

            # 计算Z-score标准化后的平均表达
            if sp.issparse(cluster_data.X):
                cluster_mean = np.array(cluster_data.X.mean(axis=0)).flatten()
            else:
                cluster_mean = cluster_data.X.mean(axis=0)

            # 标准化处理
            cluster_mean = (cluster_mean - cluster_mean.mean()) / cluster_mean.std()
            cluster_expr.append(cluster_mean)

        expr_matrix = np.vstack(cluster_expr)

        # 创建热图
        plt.figure(figsize=figsize)
        sns.heatmap(expr_matrix,
                    cmap="RdBu_r",  # 红蓝渐变
                    annot=False,
                    linewidths=0.5,
                    xticklabels=top_genes,
                    yticklabels=cluster_names,
                    cbar_kws={'label': '标准化表达值'},
                    center=0)

        plt.title(f"{groupby}分组中的标志基因表达模式", fontsize=16)
        plt.xlabel("基因", fontsize=14)
        plt.ylabel(groupby, fontsize=14)
        plt.yticks(rotation=0)

        plt.tight_layout()
        save_path = os.path.join(output_dir, f'differential_expression_{groupby}.png')
        plt.savefig(save_path, bbox_inches='tight', dpi=300)
        plt.close()
        logger.info(f"差异表达热图保存至: {save_path}")

    except Exception as e:
        logger.error(f"生成差异表达热图失败: {str(e)}")
        with open(os.path.join(output_dir, f'differential_expression_error.txt'), 'w') as f:
            f.write(f"Error: {str(e)}")

# -----------------------------------------------------------
# 6. 主流程 - 高度优化内存使用
# -----------------------------------------------------------
if __name__ == "__main__":
    try:
        # 禁用降采样配置
        ENABLE_DOWNSAMPLING = False
        output_dir = 'professional_results'
        os.makedirs(output_dir, exist_ok=True)

        # 设置matplotlib日志级别
        mpl_logger = logging.getLogger('matplotlib')
        mpl_logger.setLevel(logging.WARNING)

        # 1. 加载数据
        resolution = '016um'
        logger.info(f"Loading {resolution} resolution data...")
        adata = load_visium_hd_data(resolution=resolution)
        adata.var_names_make_unique()

        # 2. 预处理
        logger.info("Preprocessing without downsampling...")
        adata = preprocess_data(adata, enable_downsampling=False)

        # 3. BANKSY特征计算
        logger.info("Computing optimized BANKSY features...")
        banksy_adata = generate_banksy_features(adata, lambda_val=0.7, nbrs=25)

        # 4. 聚类
        logger.info("Performing optimized clustering...")
        adata = banksy_clustering(adata, banksy_adata, n_clusters=10)

        # 确保聚类标签是分类类型
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')

        # 释放资源
        if hasattr(banksy_adata, 'uns') and '_h5f' in banksy_adata.uns:
            banksy_adata.uns['_h5f'].close()
        del banksy_adata
        gc.collect()
        log_memory_usage("聚类完成后")

        # 5. 计算关键可视化特征
        logger.info("计算关键可视化特征...")

        # 确保PCA存在
        if 'X_pca' not in adata.obsm:
            logger.info("计算PCA降维...")
            sc.pp.pca(adata)

        # 计算UMAP
        logger.info("计算UMAP投影...")
        sc.pp.neighbors(adata, n_neighbors=15, use_rep='X_pca')
        sc.tl.umap(adata)

        # 计算标记基因
        logger.info("计算差异表达基因...")
        sc.tl.rank_genes_groups(adata, 'BANKSY_clusters', method='t-test')

        # 6. 生成所有可视化图表
        logger.info("Generating professional visualizations...")

        # A. 空间聚类分布图
        plot_spatial_clusters(adata, output_dir)

        # B. 聚类相关性热图
        plot_cluster_correlation(adata, output_dir)

        # C. 聚类连通性矩阵
        plot_connectivity_matrix(adata, output_dir)

        # D. 神经细胞分析图（修复版）
        plot_neural_cell_analysis(adata, output_dir)

        # E. 差异基因表达小提琴图（修复版）
        plot_gene_expression_violin(adata, output_dir)

        # F. 聚类投影图（修复版）
        plot_cluster_projection(adata, output_dir)

        # G. 聚类大小分布图
        plot_cluster_size_distribution(adata, output_dir)

        # H. 聚类散点图
        plot_spatial_scatter_by_cluster(adata, output_dir)

        # I. UMAP可视化
        plot_umap_visualization(adata, output_dir)

        # J. 差异表达热图
        plot_differential_expression_heatmap(adata, output_dir, groupby='BANKSY_clusters')

        # K. 差异基因表达热图
        plot_differential_expression(adata, output_dir)

        # 7. 保存精简结果
        logger.info("Saving streamlined results...")
        adata_core = anndata.AnnData(
            X=sp.csr_matrix((adata.n_obs, 0)),
            obs=pd.DataFrame({
                'BANKSY_clusters': adata.obs['BANKSY_clusters']
            }),
            obsm={'spatial': adata.obsm['spatial']}
        )
        adata_core.uns = {
            'metrics': adata.uns.get('banksy_metrics', {}),
            'tissue_image': adata.uns.get('tissue_image', ''),
            'connectivity_matrix': adata.uns.get('connectivity_matrix', None)
        }
        adata_core.write(f'{output_dir}/banksy_results.h5ad', compression='gzip')

        logger.info("Processing completed successfully!")

    except Exception as e:
        logger.exception("Error during execution")
        # 保存错误信息
        with open(os.path.join(output_dir, 'processing_error.txt'), 'w') as f:
            f.write(f"Error: {str(e)}\n")
            import traceback

            f.write(traceback.format_exc())
        raise e