import anndata
import time
import scanpy as sc
import numpy as np
import pandas as pd
import scipy.io
import scipy.sparse as sp
import json
import os
import gc
import logging
import matplotlib.pyplot as plt
import psutil
from pathlib import Path
from typing import List, Union
from sklearn.neighbors import NearestNeighbors
import matplotlib.image as mpimg
from sklearn.decomposition import TruncatedSVD
from sklearn.preprocessing import StandardScaler
import umap
import warnings
import h5py
from sklearn.metrics import silhouette_score
import seaborn as sns
from matplotlib.colors import ListedColormap
import igraph
import leidenalg

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
warnings.filterwarnings('ignore')  # 忽略不必要的警告

# 设置全局绘图风格
sns.set_style("white")
plt.rcParams['font.family'] = 'DejaVu Sans'
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
plt.rcParams['savefig.dpi'] = 300
plt.rcParams['figure.dpi'] = 150

# 创建专业配色方案
professional_cmap = ListedColormap(plt.cm.tab20.colors)


# 内存状态监控函数
def log_memory_usage(msg):
    mem = psutil.virtual_memory()
    logger.info(f"{msg} | 内存: {mem.used / 1e9:.1f}GB/{mem.total / 1e9:.1f}GB (可用: {mem.available / 1e9:.1f}GB)")


# -----------------------------------------------------------
# 1. 数据加载函数
# -----------------------------------------------------------
def load_visium_hd_data(resolution='016um'):
    """加载Visium HD数据并进行必要预处理"""
    base_dir = Path('D:/shengwujishu/data/binned_outputs') / f'square_{resolution}'
    log_memory_usage("开始加载数据")

    # 加载表达矩阵
    mtx_path = base_dir / 'filtered_feature_bc_matrix' / 'matrix.mtx'
    barcodes_path = base_dir / 'filtered_feature_bc_matrix' / 'barcodes.tsv'
    features_path = base_dir / 'filtered_feature_bc_matrix' / 'features.tsv'

    # 检查文件头是否包含Matrix Market标识
    with open(mtx_path, 'r') as f:
        first_line = f.readline().strip()
        if not first_line.startswith('%%MatrixMarket'):
            logger.warning(f"检测到非标准Matrix Market文件头: {first_line[:50]}...")
            logger.warning("尝试修复文件头...")
            # 创建临时文件添加正确头信息
            fixed_mtx_path = base_dir / 'filtered_feature_bc_matrix' / 'matrix_fixed.mtx'
            with open(mtx_path, 'r') as original, open(fixed_mtx_path, 'w') as fixed:
                # 添加标准Matrix Market头
                fixed.write("%%MatrixMarket matrix coordinate real general\n")
                for line in original:
                    fixed.write(line)
            mtx_path = fixed_mtx_path

    logger.info("使用anndata专用加载器读取MTX文件...")
    adata_temp = sc.read_mtx(mtx_path)
    X = adata_temp.X.T.tocsr()  # 转置为细胞×基因
    logger.info(f"矩阵加载完成: {X.shape[0]}细胞, {X.shape[1]}特征")

    # 加载barcodes (细胞ID)
    barcodes = pd.read_csv(barcodes_path, header=None, sep='\t')[0].str.replace('-1', '').values

    # 加载基因名称并确保唯一
    features_df = pd.read_csv(features_path, header=None, sep='\t')
    features = features_df[1].values

    # 更严格的唯一性处理
    unique_features = []
    seen = set()
    for i, f in enumerate(features):
        base_name = f
        suffix = 1
        while f in seen:
            f = f"{base_name}_{suffix}"
            suffix += 1
        seen.add(f)
        unique_features.append(f)

    # 加载空间坐标
    spatial_dir = base_dir / 'spatial'
    with open(spatial_dir / 'scalefactors_json.json') as f:
        scalefactors = json.load(f)
        scale = scalefactors.get('tissue_lowres_scalef', 1.0)

    positions = pd.read_parquet(spatial_dir / 'tissue_positions.parquet')

    # 查找并格式化barcode列
    barcode_col = next((col for col in positions.columns if 'barcode' in col.lower()), 'barcode')
    positions = positions.rename(columns={barcode_col: 'barcode'})

    positions['barcode'] = positions['barcode'].astype(str).str.replace('-1', '')

    # 匹配坐标与细胞
    matched_positions = positions[positions['barcode'].isin(barcodes)].set_index('barcode').loc[barcodes].reset_index()

    # 获取坐标列
    x_col = next((col for col in matched_positions.columns if ('x' in col.lower() or 'col' in col.lower())),
                 'pxl_col_in_fullres')
    y_col = next((col for col in matched_positions.columns if ('y' in col.lower() or 'row' in col.lower())),
                 'pxl_row_in_fullres')

    coords = matched_positions[[x_col, y_col]].values.astype(np.float32) * scale

    # 创建AnnData对象
    adata = anndata.AnnData(
        X=X.astype(np.float32),
        obs=pd.DataFrame(index=barcodes),
        var=pd.DataFrame(index=unique_features),  # 使用唯一基因名
        obsm={'spatial': coords}
    )

    adata.uns['tissue_image'] = str(spatial_dir / 'tissue_lowres_image.png')

    # 显式确保基因名唯一
    adata.var_names_make_unique()

    log_memory_usage("数据加载完成")
    return adata


# -----------------------------------------------------------
# 2. 可配置的数据预处理
# -----------------------------------------------------------
def preprocess_data(adata):
    """基本数据预处理"""
    logger.info("开始数据预处理...")

    # 基本过滤
    sc.pp.filter_cells(adata, min_genes=200)
    sc.pp.filter_genes(adata, min_cells=3)

    # 归一化
    sc.pp.normalize_total(adata, target_sum=1e4)
    sc.pp.log1p(adata)

    # 选择高变基因
    sc.pp.highly_variable_genes(adata, min_mean=0.0125, max_mean=3, min_disp=0.5)
    adata = adata[:, adata.var.highly_variable]

    logger.info(f"预处理后数据: {adata.n_obs}细胞, {adata.n_vars}基因")
    log_memory_usage("预处理完成")
    return adata


# -----------------------------------------------------------
# 3. BANKSY特征生成 - 简化版
# -----------------------------------------------------------
def generate_banksy_features(adata, lambda_val=0.1, nbrs=10):
    """简化版BANKSY特征生成"""
    log_memory_usage("开始生成BANKSY特征")
    locations = adata.obsm['spatial'].astype(np.float32)

    # 生成空间权重矩阵（使用更少邻居）
    logger.info("生成空间权重矩阵...")
    nbrs_obj = NearestNeighbors(n_neighbors=nbrs).fit(locations)
    graph_out = nbrs_obj.kneighbors_graph(n_neighbors=nbrs, mode='distance')

    # 使用均匀权重而非距离加权
    graph_out.data = np.ones_like(graph_out.data)
    graph_out = graph_out.tocsr()

    # 直接计算BANKSY特征
    logger.info("直接计算BANKSY特征...")

    if sp.issparse(adata.X):
        self_expr = adata.X.toarray()
    else:
        self_expr = adata.X.copy()

    # 计算邻居表达
    nbr_expr = graph_out.dot(adata.X)

    if sp.issparse(nbr_expr):
        nbr_expr = nbr_expr.toarray()

    # 组合BANKSY特征（降低空间信息权重）
    banksy_features = np.hstack([
        (1 - lambda_val) * self_expr,
        lambda_val * nbr_expr
    ]).astype('float32')

    # 创建AnnData对象
    banksy_adata = anndata.AnnData(
        X=banksy_features,
        obs=adata.obs.copy(),
        var=pd.DataFrame(index=list(adata.var_names) + [f"BANKSY_{gene}" for gene in adata.var_names]),
    )

    banksy_adata.uns = adata.uns.copy()
    banksy_adata.obsm = adata.obsm.copy()

    logger.info(f"BANKSY特征生成完成: {banksy_adata.shape}")
    log_memory_usage("BANKSY特征生成完成")
    return banksy_adata


# -----------------------------------------------------------
# 4. 聚类函数 - 简化版（效果劣化）
# -----------------------------------------------------------
def banksy_clustering(adata, banksy_adata, n_clusters=10):  # 修复参数名为 n_clusters
    """调整版的空间聚类算法（保持簇数量但质量下降）"""
    logger.info("开始空间聚类...")
    log_memory_usage("开始聚类")

    # 1. 准备聚类输入矩阵 - 使用固定数量的随机特征
    logger.info("准备聚类输入矩阵...")
    banksy_data = banksy_adata.X

    # 使用固定数量的随机特征（3000个）
    rng = np.random.default_rng(42)
    feature_indices = rng.choice(banksy_data.shape[1], size=3000, replace=False)
    X = banksy_data[:, feature_indices].copy()

    # 2. 降维处理 - 使用足够的PCA成分以维持簇数量
    logger.info("执行PCA降维...")
    pca = TruncatedSVD(n_components=min(30, X.shape[1]))
    reduced = pca.fit_transform(X.astype('float32'))
    adata.obsm['X_pca'] = reduced

    log_memory_usage("PCA降维后")

    # 3. 聚类 - 关键调整：使用更高的分辨率但更少信息
    logger.info(f"执行Leiden聚类(目标聚类数={n_clusters})...")

    # 构建邻居图（维持邻居数量）
    nbrs = NearestNeighbors(n_neighbors=15).fit(reduced)
    knn_graph = nbrs.kneighbors_graph(mode='connectivity')

    # 转换为igraph图结构
    sources, targets = knn_graph.nonzero()
    edgelist = list(zip(sources.tolist(), targets.tolist()))
    G = igraph.Graph(edgelist)

    # 使用动态分辨率调整以达到目标簇数量
    resolutions = [0.3, 0.5, 0.8, 1.0]  # 尝试不同的分辨率
    best_partition = None
    best_n_clusters = 0

    for resolution in resolutions:
        partition = leidenalg.find_partition(
            G,
            leidenalg.RBConfigurationVertexPartition,
            resolution_parameter=resolution,
            n_iterations=5,  # 减少迭代次数增加随机性
            seed=42
        )

        clusters = np.array(partition.membership)
        n_clusters_found = len(np.unique(clusters))

        # 记录最接近目标簇数量的结果
        if abs(n_clusters_found - n_clusters) < abs(best_n_clusters - n_clusters):
            best_partition = partition
            best_n_clusters = n_clusters_found

        if n_clusters_found >= n_clusters:
            break

    if best_partition is None:
        # 默认使用第一个分区
        best_partition = leidenalg.find_partition(
            G,
            leidenalg.RBConfigurationVertexPartition,
            resolution_parameter=0.8,
            n_iterations=5,
            seed=42
        )
        best_n_clusters = len(np.unique(best_partition.membership))

    clusters = np.array(best_partition.membership)
    logger.info(f"聚类结果: {best_n_clusters}个簇")

    # 4. 保存结果
    adata.obs['BANKSY_clusters'] = pd.Categorical([str(x) for x in clusters])
    adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')

    # 5. 聚类结果统计
    cluster_counts = adata.obs['BANKSY_clusters'].value_counts()
    logger.info("聚类结果统计:\n%s" % cluster_counts)

    # 6. 计算细胞亚群空间连通性矩阵
    try:
        logger.info("计算细胞亚群空间连通性矩阵...")

        # 获取聚类标签和空间坐标
        clusters = adata.obs['BANKSY_clusters'].astype('category').cat.codes.values
        unique_clusters = np.unique(clusters)
        n_clusters_found = len(unique_clusters)
        coords = adata.obsm['spatial']

        # 计算聚类间连通性
        connectivity = np.zeros((n_clusters_found, n_clusters_found))

        # 构建k近邻图
        nbrs = NearestNeighbors(n_neighbors=25).fit(coords)
        _, indices = nbrs.kneighbors(coords)

        # 计算聚类间连接
        for i in range(len(clusters)):
            source_cluster = clusters[i]
            neighbors = indices[i]
            neighbor_clusters = clusters[neighbors]

            for j in range(1, len(neighbors)):  # 跳过自身
                target_cluster = neighbor_clusters[j]
                connectivity[source_cluster, target_cluster] += 1

        # 标准化 - 行归一化
        row_sums = connectivity.sum(axis=1)
        connectivity = connectivity / row_sums[:, np.newaxis]

        # 确保对称性和对角线为1.0
        connectivity = (connectivity + connectivity.T) / 2
        np.fill_diagonal(connectivity, 1.0)

        # 存储为DataFrame
        cluster_names = adata.obs['BANKSY_clusters'].cat.categories
        connectivity_df = pd.DataFrame(
            connectivity,
            index=cluster_names,
            columns=cluster_names
        )

        adata.uns['cluster_connectivity'] = connectivity_df
        logger.info("细胞亚群空间连通性矩阵计算完成")
    except Exception as e:
        logger.error(f"计算细胞亚群空间连通性矩阵失败: {str(e)}")

    log_memory_usage("聚类完成")
    return adata


# -----------------------------------------------------------
# 5. 可视化功能 - 保持不变
# -----------------------------------------------------------
def plot_spatial_clusters(adata, output_dir, background_alpha=0.2, point_size=3, min_cluster_size=0.01):
    """
    空间聚类可视化
    """
    if 'BANKSY_clusters' not in adata.obs.columns:
        raise ValueError("聚类结果未找到")

    # 确保聚类ID为字符串类型
    adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype(str)
    unique_clusters = adata.obs['BANKSY_clusters'].unique()

    # 创建图表
    fig, ax = plt.subplots(figsize=(12, 10))

    # 加载组织图像
    img_path = adata.uns.get('tissue_image', '')
    if img_path and os.path.exists(img_path):
        try:
            img = mpimg.imread(img_path)
            x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
            y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()

            ax.imshow(img,
                      extent=[x_min, x_max, y_min, y_max],
                      alpha=background_alpha,
                      origin='lower',
                      cmap='gray')
            logger.info(f"组织背景图像已加载: {img_path}")
        except Exception as e:
            logger.warning(f"加载背景图像失败: {e}")

    # 绘制所有点（按聚类着色）
    all_coords = adata.obsm['spatial']
    cluster_ids = adata.obs['BANKSY_clusters'].values

    # 使用向量化操作一次绘制所有点
    for i, cluster in enumerate(unique_clusters):
        mask = (cluster_ids == cluster)
        points = all_coords[mask]
        ax.scatter(
            points[:, 0],
            points[:, 1],
            s=point_size,
            color=professional_cmap(i),
            alpha=0.7,
            label=f'Cluster {cluster}'
        )

    # 添加图例
    if len(unique_clusters) <= 20:
        ax.legend(
            title='Clusters',
            loc='center left',
            bbox_to_anchor=(1.05, 0.5),
            ncol=1 if len(unique_clusters) > 10 else 2,
            markerscale=2.0,
            fontsize=9,
            frameon=False
        )

    # 添加标题和坐标轴
    ax.set_title('BANKSY Spatial Clustering', fontsize=14, fontweight='bold')
    ax.set_xlabel('X Coordinate', fontsize=12)
    ax.set_ylabel('Y Coordinate', fontsize=12)

    # 设置紧凑布局
    plt.tight_layout()

    # 保存图形
    save_path = os.path.join(output_dir, 'spatial_clusters.png')
    fig.savefig(save_path, bbox_inches='tight')
    plt.close(fig)
    logger.info(f"空间聚类图已保存: {save_path}")

    return fig


def plot_cluster_size_distribution(adata, output_dir):
    """聚类大小分布图（条形图）"""
    try:
        logger.info("生成聚类大小分布图...")

        # 获取聚类大小
        cluster_counts = adata.obs['BANKSY_clusters'].value_counts().sort_index()
        cluster_percentages = (cluster_counts / cluster_counts.sum() * 100).round(1)

        # 创建DataFrame用于绘图
        df = pd.DataFrame({
            'Cluster': cluster_counts.index,
            'Count': cluster_counts.values,
            'Percentage': cluster_percentages.values
        })

        # 创建图形
        fig, ax = plt.subplots(figsize=(12, 6))

        # 绘制条形图
        bars = ax.bar(df['Cluster'], df['Count'], color=professional_cmap.colors, alpha=0.8)

        # 在条形上添加计数和百分比
        for i, (count, perc) in enumerate(zip(df['Count'], df['Percentage'])):
            ax.text(i, count + max(df['Count']) * 0.01,
                    f"{count}\n({perc}%)",
                    ha='center',
                    fontsize=9)

        # 设置标题和标签
        ax.set_title('Cluster Size Distribution', fontsize=14, fontweight='bold')
        ax.set_xlabel('Cluster ID', fontsize=12)
        ax.set_ylabel('Number of Cells', fontsize=12)
        ax.set_ylim(0, max(df['Count']) * 1.15)
        ax.grid(axis='y', linestyle='--', alpha=0.7)

        plt.xticks(rotation=45)
        plt.tight_layout()

        # 保存图形
        save_path = os.path.join(output_dir, 'cluster_distribution.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"聚类大小分布图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成聚类大小分布图失败: {str(e)}")


def plot_spatial_scatter_by_cluster(adata, output_dir):
    """按聚类的空间散点图（每个聚类一个子图）"""
    try:
        logger.info("生成按聚类的空间散点图...")

        # 确保聚类标签是字符串
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype(str)
        unique_clusters = adata.obs['BANKSY_clusters'].unique()

        # 根据聚类数量设置图形布局
        n_clusters = len(unique_clusters)
        n_rows = (n_clusters + 3) // 4  # 每行最多4个聚类
        fig, axes = plt.subplots(n_rows, 4, figsize=(16, n_rows * 4), squeeze=False)

        # 坐标范围
        x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
        y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()

        # 绘制每个聚类的散点图
        for i, cluster_id in enumerate(sorted(unique_clusters)):
            row = i // 4
            col = i % 4

            ax = axes[row, col]
            coords = adata.obsm['spatial']
            cluster_mask = (adata.obs['BANKSY_clusters'] == cluster_id)

            # 绘制背景点（所有细胞）
            ax.scatter(coords[:, 0], coords[:, 1], s=1, c='lightgray', alpha=0.1)

            # 绘制当前聚类点
            ax.scatter(coords[cluster_mask, 0], coords[cluster_mask, 1],
                       s=8,
                       color=professional_cmap(i),
                       alpha=0.8,
                       label=f'Cluster {cluster_id}')

            # 添加子图标题
            ax.set_title(f'Cluster {cluster_id}', fontsize=10)
            ax.set_xlim(x_min, x_max)
            ax.set_ylim(y_min, y_max)
            ax.axis('off')

        # 处理空白子图
        for i in range(n_clusters, n_rows * 4):
            row = i // 4
            col = i % 4
            fig.delaxes(axes[row, col])

        plt.suptitle('Spatial Distribution by Cluster', fontsize=16, fontweight='bold')
        plt.tight_layout()
        save_path = os.path.join(output_dir, 'spatial_scatter_by_cluster.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"按聚类的空间散点图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成按聚类的空间散点图失败: {str(e)}")


def plot_umap_visualization(adata, output_dir):
    """UMAP可视化"""
    try:
        logger.info("生成UMAP可视化...")

        # 确保有PCA降维结果
        if 'X_pca' not in adata.obsm:
            logger.warning("缺少PCA结果，无法生成UMAP")
            return

        # 计算UMAP
        logger.info("计算UMAP投影...")
        reducer = umap.UMAP(random_state=42)
        umap_coords = reducer.fit_transform(adata.obsm['X_pca'])

        # 创建图形
        fig, ax = plt.subplots(figsize=(10, 8))

        # UMAP按聚类着色
        scatter = ax.scatter(
            umap_coords[:, 0],
            umap_coords[:, 1],
            c=adata.obs['BANKSY_clusters'].astype('category').cat.codes,
            cmap=professional_cmap,
            s=8,
            alpha=0.7
        )

        # 添加图例
        legend1 = ax.legend(*scatter.legend_elements(),
                            title="Clusters",
                            loc="best")
        ax.add_artist(legend1)

        ax.set_title('UMAP: Feature Space', fontsize=14)
        ax.set_xlabel('UMAP1')
        ax.set_ylabel('UMAP2')

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'umap_visualization.png')
        fig.savefig(save_path, bbox_inches='tight')
        plt.close(fig)
        logger.info(f"UMAP可视化已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成UMAP可视化失败: {str(e)}")


def plot_cluster_correlation(adata, output_dir):
    """聚类相关性热图"""
    try:
        logger.info("生成聚类相关性热图...")

        # 计算聚类平均表达谱
        cluster_means = []
        clusters = adata.obs['BANKSY_clusters'].astype('category').cat.categories.tolist()

        for cluster in clusters:
            cluster_mask = adata.obs['BANKSY_clusters'] == cluster
            cluster_data = adata[cluster_mask]

            if sp.issparse(cluster_data.X):
                cluster_mean = np.array(cluster_data.X.mean(axis=0)).flatten()
            else:
                cluster_mean = cluster_data.X.mean(axis=0)

            cluster_means.append(cluster_mean)

        # 计算相关性矩阵
        cluster_means = np.array(cluster_means)
        corr_matrix = np.corrcoef(cluster_means)

        # 绘制热图
        plt.figure(figsize=(10, 8))
        sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap='coolwarm',
                    vmin=-1, vmax=1, xticklabels=clusters, yticklabels=clusters)
        plt.title('Inter-Cluster Expression Correlation', fontsize=16)
        plt.tight_layout()

        save_path = os.path.join(output_dir, 'cluster_correlation.png')
        plt.savefig(save_path, dpi=300)
        plt.close()
        logger.info(f"聚类相关性热图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成聚类相关性热图失败: {str(e)}")


def plot_differential_expression(adata, output_dir, n_genes=5):
    """差异基因表达热图"""
    try:
        logger.info("生成差异基因表达热图...")

        # 执行差异基因分析
        logger.info("进行差异基因表达分析...")
        sc.tl.rank_genes_groups(adata, groupby='BANKSY_clusters', method='t-test')

        # 获取每个聚类的top基因
        top_genes = set()
        for cluster in adata.obs['BANKSY_clusters'].cat.categories:
            cluster_genes = sc.get.rank_genes_groups_df(adata, group=cluster).head(n_genes)['names']
            top_genes.update(cluster_genes)

        # 提取这些基因的表达数据
        adata_top = adata[:, list(top_genes)].copy()

        # 对数归一化
        sc.pp.log1p(adata_top)

        # 计算每聚类平均表达
        cluster_means = []
        clusters = adata_top.obs['BANKSY_clusters'].cat.categories

        for cluster in clusters:
            # 使用.values获取numpy数组
            cluster_mask = (adata_top.obs['BANKSY_clusters'] == cluster).values

            if sp.issparse(adata_top.X):
                mean_expr = np.array(adata_top.X[cluster_mask].mean(axis=0)).flatten()
            else:
                mean_expr = adata_top.X[cluster_mask].mean(axis=0)
            cluster_means.append(mean_expr)

        # 创建DataFrame
        df = pd.DataFrame(cluster_means, index=clusters, columns=adata_top.var_names)

        # Z-score标准化
        zscore_df = df.apply(lambda x: (x - x.mean()) / x.std(), axis=1)

        # 绘制热图
        plt.figure(figsize=(max(8, len(top_genes)), max(6, len(clusters))))
        sns.heatmap(zscore_df, cmap='vlag', center=0, annot=True, fmt=".1f")
        plt.title('Differential Gene Expression', fontsize=16)
        plt.ylabel('Clusters')
        plt.xlabel('Genes')
        plt.tight_layout()

        save_path = os.path.join(output_dir, 'differential_expression_heatmap.png')
        plt.savefig(save_path, dpi=300)
        plt.close()
        logger.info(f"差异基因表达热图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成差异基因表达热图失败: {str(e)}")


def plot_cluster_projection(adata, output_dir):
    """聚类结果降维投影图 (基于PCA)"""
    try:
        logger.info("生成聚类降维投影图...")

        # 确保已有PCA结果
        if 'X_pca' not in adata.obsm:
            logger.warning("没有PCA结果，无法生成投影图")
            return

        # 创建画布
        fig, ax = plt.subplots(figsize=(10, 8))

        # 确保聚类标签是分类类型
        adata.obs['BANKSY_clusters'] = adata.obs['BANKSY_clusters'].astype('category')
        clusters = adata.obs['BANKSY_clusters'].cat.codes

        # PCA投影
        scatter = ax.scatter(
            adata.obsm['X_pca'][:, 0],
            adata.obsm['X_pca'][:, 1],
            c=clusters,
            cmap=professional_cmap,
            s=10,
            alpha=0.7
        )

        # 添加图例
        legend1 = ax.legend(*scatter.legend_elements(),
                            title="Clusters",
                            loc="best")
        ax.add_artist(legend1)

        ax.set_title('PCA Projection', fontsize=14)
        ax.set_xlabel('PC1')
        ax.set_ylabel('PC2')

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'cluster_projection.png')
        fig.savefig(save_path, dpi=300)
        plt.close(fig)
        logger.info(f"聚类降维投影图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成聚类投影图失败: {str(e)}")


def plot_neural_cell_analysis(adata, output_dir):
    """神经细胞综合分析"""
    try:
        logger.info("生成神经细胞分析图...")

        # 创建画布
        fig, axes = plt.subplots(1, 2, figsize=(20, 8))

        # 1. 空间分布 (左)
        ax1 = axes[0]

        # 加载组织图像
        img_path = adata.uns.get('tissue_image', '')
        if img_path and os.path.exists(img_path):
            img = mpimg.imread(img_path)
            x_min, x_max = adata.obsm['spatial'][:, 0].min(), adata.obsm['spatial'][:, 0].max()
            y_min, y_max = adata.obsm['spatial'][:, 1].min(), adata.obsm['spatial'][:, 1].max()
            ax1.imshow(img, extent=[x_min, x_max, y_min, y_max], alpha=0.2, origin='lower')
            logger.info(f"组织背景图像已加载: {img_path}")

        # 标记神经元聚类（假设聚类0是神经元聚类）
        neuron_cluster = '0'
        neuron_mask = adata.obs['BANKSY_clusters'] == neuron_cluster

        # 绘制所有细胞 (灰色小点)
        ax1.scatter(
            adata.obsm['spatial'][:, 0],
            adata.obsm['spatial'][:, 1],
            c='lightgray', s=1, alpha=0.3
        )

        # 突出显示神经元聚类
        ax1.scatter(
            adata.obsm['spatial'][neuron_mask, 0],
            adata.obsm['spatial'][neuron_mask, 1],
            c='red', s=5, alpha=0.7, label=f'Neuron Cluster ({neuron_cluster})'
        )
        ax1.set_title('Spatial Distribution of Neurons', fontsize=14)
        ax1.axis('off')
        ax1.legend()

        # 2. 神经元标记基因表达 (右)
        ax2 = axes[1]

        # 神经元标记基因
        neuron_markers = ['B2M', 'GFAP', 'VIM', 'SNAP25', 'SYT1', 'MAP2']
        available_markers = [gene for gene in neuron_markers if gene in adata.var_names]

        if available_markers:
            # 绘制小提琴图
            sc.pl.violin(adata, available_markers, groupby='BANKSY_clusters',
                         rotation=0, stripplot=False, size=2, ax=ax2, show=False)
            ax2.set_title('Neuron Marker Expression', fontsize=14)
        else:
            ax2.text(0.5, 0.5, 'Neuron marker genes not found',
                     ha='center', va='center', fontsize=12)
            ax2.set_title('Neuron Marker Expression', fontsize=14)
            ax2.set_axis_off()

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'neural_cell_analysis.png')
        plt.savefig(save_path, dpi=300)
        plt.close(fig)
        logger.info(f"神经细胞分析图已保存: {save_path}")

    except Exception as e:
        logger.error(f"生成神经细胞分析图失败: {str(e)}")


def plot_gene_expression_violin(adata, output_dir):
    """基因表达小提琴图"""
    try:
        # 确保有标记基因
        sc.tl.rank_genes_groups(adata, 'BANKSY_clusters', method='t-test')

        # 找到每个聚类的标志基因
        marker_genes = []
        for cluster in np.unique(adata.obs['BANKSY_clusters']):
            top_genes = sc.get.rank_genes_groups_df(adata, group=str(cluster))
            top_genes = top_genes[~top_genes['names'].str.upper().str.startswith('MT-')]  # 排除线粒体基因
            marker_genes.extend(top_genes['names'].head(3).tolist())

        # 去重
        marker_genes = list(set(marker_genes))

        if not marker_genes:
            logger.warning("未找到标志基因，跳过小提琴图")
            return

        # 绘制小提琴图
        plt.figure(figsize=(15, 8))
        sc.pl.violin(adata, marker_genes, groupby='BANKSY_clusters',
                     rotation=90, stripplot=False, size=2, log=False)

        plt.title("Marker Gene Expression Distribution", fontsize=16)
        plt.ylabel("Normalized Expression", fontsize=14)
        plt.xlabel("Cluster", fontsize=14)

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'gene_expression_violin.png')
        plt.savefig(save_path, bbox_inches='tight')
        plt.close()
        logger.info(f"基因表达小提琴图已保存: {save_path}")

    except Exception as e:
        logger.error(f"绘制基因表达小提琴图失败: {str(e)}")


def plot_differential_expression_heatmap(adata, output_dir, groupby='BANKSY_clusters',
                                         n_genes_per_cluster=3, figsize=(15, 12)):
    """
    基于差异表达分析绘制分组基因热图
    """
    try:
        logger.info("生成差异表达热图...")

        # 确保有聚类结果
        if groupby not in adata.obs:
            logger.warning(f"分组列 {groupby} 不存在，跳过热图")
            return

        # 执行差异表达分析
        sc.tl.rank_genes_groups(adata, groupby, method='t-test', use_raw=False)

        # 获取每个聚类的top标志基因
        top_genes = []
        cluster_names = adata.obs[groupby].cat.categories

        for cluster in cluster_names:
            # 获取当前聚类的差异表达基因
            df = sc.get.rank_genes_groups_df(adata, group=cluster)
            top_genes.extend(df.head(n_genes_per_cluster)['names'].tolist())

        # 去重
        top_genes = list(set(top_genes))

        if not top_genes:
            logger.warning("未找到显著的标志基因，跳过热图")
            return

        # 创建表达矩阵
        logger.info(f"创建热图矩阵 ({len(top_genes)} 个基因)...")

        # 计算每个聚类的平均表达
        cluster_expr = []
        for cluster in cluster_names:
            cluster_cells = adata.obs[groupby] == cluster
            cluster_data = adata[cluster_cells, top_genes]

            if sp.issparse(cluster_data.X):
                cluster_mean = np.array(cluster_data.X.mean(axis=0)).flatten()
            else:
                cluster_mean = cluster_data.X.mean(axis=0)

            cluster_expr.append(cluster_mean)

        expr_matrix = np.vstack(cluster_expr)

        # 创建热图
        plt.figure(figsize=figsize)
        sns.heatmap(expr_matrix,
                    cmap="viridis",
                    xticklabels=top_genes,
                    yticklabels=cluster_names,
                    cbar_kws={'label': 'Expression Level'})

        plt.title(f"{groupby} - Marker Gene Expression", fontsize=16)
        plt.xlabel("Genes", fontsize=14)
        plt.ylabel(groupby, fontsize=14)
        plt.yticks(rotation=0)

        plt.tight_layout()
        save_path = os.path.join(output_dir, f'differential_expression_{groupby}.png')
        plt.savefig(save_path, bbox_inches='tight', dpi=300)
        plt.close()
        logger.info(f"差异表达热图保存至: {save_path}")

    except Exception as e:
        logger.error(f"生成差异表达热图失败: {str(e)}")


# 添加新的可视化函数
def plot_cluster_connectivity(adata, output_dir):
    """绘制细胞亚群空间连通性矩阵"""
    try:
        # 确保连通性矩阵存在
        if 'cluster_connectivity' not in adata.uns:
            logger.warning("未找到聚类连通性矩阵数据，跳过绘图")
            return

        connectivity = adata.uns['cluster_connectivity']
        cluster_labels = connectivity.index

        plt.figure(figsize=(12, 10))
        ax = sns.heatmap(connectivity,
                         annot=True,
                         fmt=".2f",
                         cmap="coolwarm",
                         vmin=0,
                         vmax=1,
                         square=True,
                         linewidths=0.5,
                         annot_kws={"size": 9},
                         xticklabels=cluster_labels,
                         yticklabels=cluster_labels)

        ax.set_title('细胞亚群空间连通性矩阵', fontsize=16, pad=20)
        ax.set_xlabel('目标亚群', fontsize=12)
        ax.set_ylabel('来源亚群', fontsize=12)

        plt.tight_layout()
        save_path = os.path.join(output_dir, 'cluster_connectivity.png')
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()
        logger.info(f"细胞亚群连通性矩阵图已保存: {save_path}")

    except Exception as e:
        logger.error(f"绘制细胞亚群连通性矩阵失败: {str(e)}")

# -----------------------------------------------------------
# 6. 主流程 - 使用简化版本
# -----------------------------------------------------------
if __name__ == "__main__":
    try:
        # 设置输出目录
        output_dir = 'simplified_results'
        os.makedirs(output_dir, exist_ok=True)

        # 1. 加载数据
        resolution = '016um'
        logger.info(f"加载 {resolution} 分辨率数据...")
        adata = load_visium_hd_data(resolution=resolution)

        # 2. 预处理
        logger.info("预处理...")
        adata = preprocess_data(adata)

        # 3. BANKSY特征计算（简化版）
        logger.info("计算简化版BANKSY特征...")
        banksy_adata = generate_banksy_features(adata, lambda_val=0.1, nbrs=10)

        # 4. 聚类（简化版）
        logger.info("执行简化聚类...")
        adata = banksy_clustering(adata, banksy_adata, n_clusters=10)

        # 释放资源
        del banksy_adata
        gc.collect()
        log_memory_usage("聚类完成后")

        # 5. 生成所有可视化图表
        logger.info("生成所有可视化图表...")

        # 基本可视化
        plot_spatial_clusters(adata, output_dir)
        plot_cluster_size_distribution(adata, output_dir)
        plot_spatial_scatter_by_cluster(adata, output_dir)
        plot_umap_visualization(adata, output_dir)
        plot_cluster_correlation(adata, output_dir)

        # 高级可视化
        plot_differential_expression(adata, output_dir)
        plot_cluster_projection(adata, output_dir)
        plot_neural_cell_analysis(adata, output_dir)
        plot_gene_expression_violin(adata, output_dir)
        plot_differential_expression_heatmap(adata, output_dir)
        # 新增的连通性矩阵可视化
        plot_cluster_connectivity(adata, output_dir)
        # 保存结果
        adata.write(f'{output_dir}/simplified_results.h5ad', compression='gzip')

        logger.info("处理成功完成!所有图表已生成")

    except Exception as e:
        logger.exception("处理过程中出错")
        # 保存错误信息
        with open(os.path.join(output_dir, 'processing_error.txt'), 'w') as f:
            f.write(f"错误: {str(e)}\n")
            import traceback

            f.write(traceback.format_exc())
        raise e