# pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
# conda create --name envBioInfo3.11.5 python=3.11.5
# conda activate envBioInfo3.11.5
# pip install scanpy anndata
# pip install --upgrade numpy scipy pandas scanpy anndata
# pip install zstandard   其他文件需要
# pip install bitarray   其他文件需要

import numpy as np
import scipy.sparse as sp
import anndata as ad
import os
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score
from datetime import datetime
import copy
import time
import json
import h5py

# for UMAP
import scanpy as sc
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt

# import seaborn as sns
import pandas as pd

# 引用信息嵌入算法模块
import HidingMethodDe
import HidingMethodEmbr


def EmbedData(
    inputPath: str,
    saveasPath: str,
    outputPath: str,
    secret_data: str,
    sn: int,
    m: int,
    method: str = "Embr",
):
    """
    在.h5ad文件中嵌入秘密数据（ASCII 嵌入算法）
    inputPath: 输入.h5ad文件路径, str
    saveasPath: 另存为的.h5ad文件路径， str, 以便使用相同的压缩方法保存
    outputPath: 输出含密.h5ad文件路径, str
    secret_data: 要嵌入的 ASCII 字节数据, str
    method: 嵌入算法名称, str  "Snb" 或 "de"
    """
    print("    EmbedData函数调用")
    # 1. 读取原始数据
    adata = ad.read_h5ad(inputPath)
    originalSize = os.path.getsize(inputPath)
    print(f"    原始文件大小:{originalSize}")
    # 2. 打开 h5 看看原始策略
    with h5py.File(inputPath, "r") as f:

        def visit(k, v):
            if hasattr(v, "compression"):
                # print(k, v.compression, v.dtype, v.shape)
                pass

        f.visititems(visit)

    adata.write_h5ad(saveasPath, compression="gzip", compression_opts=6, as_dense=())
    newSize = os.path.getsize(saveasPath)

    print(f"    另存后文件的大小:{newSize}")
    # 保存原始数据
    # adata.uns['_original_X'] = adata.X.toarray() if sp.issparse(adata.X) else adata.X
    assert isinstance(
        adata.X, sp.csr_matrix
    )  # 判断变量类型是否为 scipy.sparse.csr_matrix
    OriginalAdataX = copy.deepcopy(
        adata.X
    )  # 保存原始数据,  类型为scipy.sparse.csr_matrix
    print(f"    OriginalAdataX.shape: {OriginalAdataX.shape}")
    print(f"    adata.X 数据类型: {adata.X.dtype}")
    print(f"    adata.X.data.shape: {adata.X.data.shape}")
    print(f"    adata.X.data 数据类型: {adata.X.data.dtype}")

    # 2. 序列化秘密数据（ASCII 字节流）
    # 确保数据是ASCII编码（非ASCII字符会抛出UnicodeEncodeError）
    asciiBytes = secret_data.encode("ascii")
    # 将每个字节转换为8位二进制（补零确保8位长度），再分解为比特列表
    bitList = []
    for byte in asciiBytes:
        # 转换为8位二进制字符串，例如 65 -> '01000001'
        binaryStr = f"{byte:08b}"
        # 将字符串中的每个字符转换为整数，添加到列表
        bitList.extend([int(bit) for bit in binaryStr])

    # 3. 嵌入数据
    if method == "Embr":
        targetData = HidingMethodEmbr.EmbedWithEmbr(adata.X.data, bitList, sn, m)
        # adata.X.data 是 非零元素的值数组, 它只包含矩阵中所有非零元素的值，是一个一维的 numpy.ndarray。
    if method == "de":
        targetData, swapFlags = HidingMethodDe.embedSecretWithDe(
            adata.X.data, bitList, precision=9
        )
    print(f"    嵌入前送入的元素个数{len(adata.X.data)}")
    print(f"    嵌入后返回的元素个数{len(targetData)}")
    # 5. 更新目标数据
    adata.X.data = targetData
    print(f"    adata.X.data.shape: {adata.X.data.shape}")
    print(f"    adata.X.data 数据类型: {adata.X.data.dtype}")
    # 4. 生物兼容性检查
    originalData = sp.csr_matrix(OriginalAdataX)
    maxDiff = np.max(np.abs(adata.X.data - originalData.data))
    # 写入输出文件前的检验
    print(f"    基因数据 maxDiff:{maxDiff:.6e}")
    base, ext = os.path.splitext(inputPath)
    base = base.replace("\original", "")
    RESULT_TXT = f"{base}_sn_{sn}_result.txt"
    # 追加写入 txt
    with open(RESULT_TXT, "a", encoding="utf-8") as f:
        f.write(f"    \n=== {inputPath} ===\n")
        f.write(
            f"    \n=== 写入输出文件前的检验 @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n"
        )
        f.write(f"    基因数据 maxDiff:{maxDiff:.6e}")
    # 5. 保存含密文件
    # adata.write(output_path)
    # newSize = os.path.getsize(output_path)
    adata.write_h5ad(outputPath, compression="gzip", compression_opts=6, as_dense=())
    newSize = os.path.getsize(outputPath)
    print(f"    文件膨胀率: {(newSize - originalSize) / originalSize * 100:.6f}%")

    if method == "Embr":
        return adata
    if method == "de":
        return adata, swapFlags


def ExtractData(
    inputPath: str,
    outputPath: str,
    dataSize: int,
    sn: int,
    m: int,
    method: str = "Embr",
):
    """
    从.h5ad文件中提取嵌入的秘密数据
    inputPath: 含密.h5ad文件路径
    outputPath:输出的文件名
    :dataSize: int
    :param method: 嵌入算法名称, str  "Embr" 或 "de"
    :return: 提取的 ASCII 字节数据
    """
    print("    ExtractData函数调用")
    # 1. 读取含密数据
    adata = ad.read_h5ad(inputPath)
    assert isinstance(
        adata.X, sp.csr_matrix
    )  # 判断变量类型是否为 scipy.sparse.csr_matrix
    data = adata.X.data
    seqLen = len(data)
    print(f"    读入的序列长度为：{seqLen}")
    if method == "Embr":
        bioStr, decompressedSnbs = HidingMethodEmbr.ExtractWithEmbr(
            data, sn, m
        )  # restoredSnbs是已经解密后的
    if method == "de":
        bioStr = HidingMethodDe.extractSecretWithDe(data, dataSize)

    print(f"    bioStr: {bioStr}, Len: {len(bioStr)}")
    print(f"    restoredSnbs Len: {len(decompressedSnbs)}")
    assert len(decompressedSnbs) >= seqLen * sn

    # OriginalSnbs = HidingMethodEmbr.GetSnbBitStr(data, sn)
    # cmp = decompressedSnbs - OriginalSnbs  #compare

    # 1. 转换 snbGroup 为 NumPy 数组
    # snbGroup = []  这种转换太慢了
    # for i in range(seqLen):
    #     snb1 = decompressedSnbs[i * 3 : i * 3 + 3]
    #     snb1 = "".join(snb1)
    #     snb1Int = int(snb1,2)
    #     snbGroup.append(snb1Int)
    # snbGroup = np.array(snbGroup,np.uint8)
    # decompressedSnbs 是 str 或 list<char>
    decompressedSnbs1 = decompressedSnbs[: seqLen * sn]  # 截取，去掉填充的0
    # open("decompressedSnbs1.txt", 'w').write(''.join(decompressedSnbs1))
    bits = np.array(list(decompressedSnbs1), dtype=np.uint8)  # 0/1 字符 → 0/1 整数
    bits = bits.reshape(-1, sn)  # 每行 sn bit
    powers = 1 << np.arange(sn - 1, -1, -1)  # [2^(sn-1), ..., 2^0]
    snbGroup = bits @ powers  # 矩阵乘法一次性转整数zzz
    # 2. 确保长度一致
    assert len(snbGroup) == seqLen, "snbGroup 长度与 float32 数组不一致"
    # 3. 获取 float32 的 uint32 视图
    seq = adata.X.data.astype(np.float32)
    print(f"seq adata.X.data:{seq[:30]}")
    uint32_view = seq.view(np.uint32)
    # 4. 替换
    mask = np.uint32((1 << sn) - 1)
    uint32_view[:] = (uint32_view & ~mask) | (snbGroup & mask)
    # 5. 写回（注意：seq 已经是 float32，不需要再赋值）
    print(f"seq adata.X.data changed:{seq[:30]}")
    adata.X.data = seq
    adata.write_h5ad(outputPath, compression="gzip", compression_opts=6, as_dense=())
    # adata.write(outputPath)
    print("Data written to file:", outputPath)
    return bioStr


def VerifyNumericalFidelity(
    file1_path: str, file2_path: str, tol: float = 1e-9, verbose: bool = False
) -> bool:
    """
    比较两个 H5AD 文件中的基因组数据 (adata.X) 是否相同。

    参数:
        file1_path (str): 第一个 H5AD 文件路径
        file2_path (str): 第二个 H5AD 文件路径
        tol (float): 浮点数比较容忍误差，默认 1e-6
        verbose (bool): 是否打印详细比较信息

    返回:
        最大差值, 平均差值
    """
    assert os.path.exists(file1_path) and os.path.exists(file2_path)
    # 读取两个文件
    print("比较两个 H5AD 文件中的基因组数据 (adata.X) 是否相同")
    try:
        adata1 = ad.read_h5ad(file1_path)
        adata2 = ad.read_h5ad(file2_path)
    except Exception as e:
        if verbose:
            print(f"    文件读取失败: {e}")
        return False

    X1, X2 = adata1.X, adata2.X

    if verbose:
        print(f"    比较基因组数据: {file1_path} vs {file2_path}")

    # 1. 检查 X 是否存在
    if X1 is None or X2 is None:
        if verbose:
            print("adata.X 为 None")
        return False

    # 2. 统一转换为 CSR 矩阵以便比较（适用于稀疏矩阵）
    if sp.issparse(X1):
        X1 = X1.tocsr()
    else:
        X1 = sp.csr_matrix(X1) if not sp.issparse(X1) else X1

    if sp.issparse(X2):
        X2 = X2.tocsr()
    else:
        X2 = sp.csr_matrix(X2) if not sp.issparse(X2) else X2

    # 3. 比较形状
    if X1.shape != X2.shape:
        if verbose:
            print(f"    数据形状不一致: {X1.shape} vs {X2.shape}")
        return False
    if verbose:
        print(f"    形状一致: {X1.shape}")

    # 4. 比较数据类型
    if X1.dtype != X2.dtype:
        if verbose:
            print(f"    数据类型不同: {X1.dtype} vs {X2.dtype}，尝试统一转换")
        # 尝试转换为较高精度类型
        dtype_common = np.promote_types(X1.dtype, X2.dtype)
        if sp.issparse(X1):
            X1 = X1.astype(dtype_common)
        else:
            X1 = np.asarray(X1, dtype=dtype_common)
        if sp.issparse(X2):
            X2 = X2.astype(dtype_common)
        else:
            X2 = np.asarray(X2, dtype=dtype_common)

    # 5. 比较非零元素个数（快速筛选）
    nnz1 = X1.nnz if sp.issparse(X1) else np.count_nonzero(X1)
    nnz2 = X2.nnz if sp.issparse(X2) else np.count_nonzero(X2)
    if nnz1 != nnz2:
        if verbose:
            print(f"    非零元素数量不同: {nnz1} vs {nnz2}")
        return False
    if verbose:
        print(f"    非零元素数量一致: {nnz1}")

    # 6. 转换为数组并进行逐元素近似比较
    arr1 = X1.toarray() if sp.issparse(X1) else np.asarray(X1)
    arr2 = X2.toarray() if sp.issparse(X2) else np.asarray(X2)
    diff = np.abs(arr1 - arr2)
    max_diff = diff.max()
    mean_diff = diff.mean()
    print(f"    max_diff: {max_diff} mean_diff: {mean_diff}")
    return {
        "max_diff": max_diff,
        "mean_diff": mean_diff,
    }


def VerifyPca(
    file1_path: str, file2_path: str, max_pca_diff=0.01, max_cluster_diff=0.03
):
    """
    比较两个文件（原始与含密）的生物兼容性，基于PCA和聚类稳定性。
    """
    # 1. 读取两个件
    try:
        adata_original = ad.read_h5ad(file1_path)
        adata_restore = ad.read_h5ad(file2_path)
    except Exception as e:
        raise RuntimeError(f"    文件读取失败: {e}")

    # 2. 提取基因表达矩阵 X
    def get_dense_array(X):
        return (
            X.toarray()
            if sp.issparse(X)
            else (X.copy() if hasattr(X, "copy") else np.array(X))
        )

    X_orig = get_dense_array(adata_original.X)
    X_wm = get_dense_array(adata_restore.X)

    # 3. PCA稳定性测试（使用前2个主成分）
    pca_orig = PCA(n_components=2).fit_transform(X_orig)
    pca_wm = PCA(n_components=2).fit_transform(X_wm)
    # 计算第一主成分的相关系数
    pca_corr = np.corrcoef(pca_orig[:, 0], pca_wm[:, 0])[0, 1]

    # 4. 聚类稳定性测试（基于PCA结果）
    kmeans_orig = KMeans(n_clusters=5, random_state=42).fit_predict(pca_orig)
    kmeans_wm = KMeans(n_clusters=5, random_state=42).fit_predict(pca_wm)
    ari_score = adjusted_rand_score(kmeans_orig, kmeans_wm)

    # 5. 兼容性判断
    is_compatible = (abs(1 - pca_corr) < max_pca_diff) and (
        ari_score > (1 - max_cluster_diff)
    )
    print(f"    PCA correlation: {pca_corr:.4f}")
    print(f"    Clustering similarity (ARI): {ari_score:.4f}")
    print(f"    Is compatible: {is_compatible}")

    return {
        "pca_correlation": float(pca_corr),
        "clustering_similarity_ari": float(ari_score),
        "is_compatible": is_compatible,
    }


# region 使用 UMAP + 共享最近邻图（Shared Nearest Neighbor Graph） 来比较两个单细胞数据集（原始 vs 含密）结构一致性
# 设置绘图参数
sc.set_figure_params(dpi=100, figsize=(6, 6))
plt.rcParams["figure.dpi"] = 100


def VerifyWithSharedUMAP(
    file1_path: str,
    file2_path: str,
    output_figure_path: str,
    batch_key: str = "sample_id",
    label_key: str = "cell_type",
    n_comps: int = 50,
    umap_neighbors: int = 30,
    umap_min_dist: float = 0.3,
    random_state: int = 42,
    plot: bool = True,
    verbose: bool = True,
    mantel_sample_size: int = 1000,
    use_approx_mantel: bool = True,
) -> dict:
    """
    使用共享 UMAP 比较两个 H5AD 文件结构一致性（内存优化版）
    关键改进：
      - 使用采样近似 Mantel 相关性，避免 O(N^2) 内存爆炸
      - 所有数组转为 float32
      - 可视化与分析分离，减少中间变量驻留

    Parameters:
    ----------
    mantel_sample_size : int
        用于近似 Mantel 测试的随机采样细胞数（建议 500~2000）
    use_approx_mantel : bool
        是否启用采样近似；否则跳过 Mantel 计算

    Returns:
    -------
    dict : 包含 ARI、近似 Mantel、兼容性判断
    """
    np.random.seed(random_state)
    if verbose:
        print("    正在加载两个数据集...")

    # 1. 读取数据
    adata_orig = sc.read_h5ad(file1_path)
    adata_wm = sc.read_h5ad(file2_path)

    # 2. 添加批次标签
    adata_orig.obs[batch_key] = "original"
    adata_wm.obs[batch_key] = "stego"

    # 判断是否使用真实标签或聚类
    use_clustering = False
    if label_key not in adata_orig.obs or label_key not in adata_wm.obs:
        if verbose:
            print(f"    警告: {label_key} 不存在，将使用 Leiden 聚类进行 ARI 计算")
        use_clustering = True

    # 3. 合并数据集
    adata_combined = ad.concat(
        [adata_orig, adata_wm],
        label=batch_key,
        keys=["original", "stego"],
        index_unique="-",  # 避免索引冲突
    )

    if verbose:
        print(f"    合并后数据形状: {adata_combined.shape}")

    # 4. PCA 降维（关键：使用 float32 并限制维度）
    if verbose:
        print("    执行 PCA 降维...")

    # 如果 X 不是密集矩阵，先转换为 dense 并转为 float32
    if not isinstance(adata_combined.X, np.ndarray):
        adata_combined.X = adata_combined.X.toarray()

    adata_combined.X = np.array(adata_combined.X, dtype=np.float32)

    # 归一化（可选，根据你之前是否做过）
    # sc.pp.scale(adata_combined)  # z-score

    sc.tl.pca(
        adata_combined, n_comps=n_comps, random_state=random_state, svd_solver="arpack"
    )

    # 强制将 PCA 结果转为 float32
    adata_combined.obsm["X_pca"] = adata_combined.obsm["X_pca"].astype(np.float32)

    # 5. 构建邻居图 + UMAP
    if verbose:
        print("    构建共享邻居图并运行 UMAP...")

    sc.pp.neighbors(
        adata_combined,
        n_pcs=n_comps,
        n_neighbors=umap_neighbors,
        random_state=random_state,
    )
    sc.tl.umap(adata_combined, min_dist=umap_min_dist, random_state=random_state)

    # 6. 聚类 & ARI 计算
    if use_clustering:
        sc.tl.leiden(
            adata_combined,
            key_added="leiden",
            resolution=1.0,
            random_state=random_state,
        )

    obs_subset = adata_combined.obs[
        [batch_key, label_key if not use_clustering else "leiden"]
    ].copy()
    orig_labels = obs_subset[obs_subset[batch_key] == "original"][
        label_key if not use_clustering else "leiden"
    ]
    wm_labels = obs_subset[obs_subset[batch_key] == "stego"][
        label_key if not use_clustering else "leiden"
    ]

    # 确保顺序一致（按原始文件顺序）
    orig_index = adata_orig.obs.index
    wm_index = adata_wm.obs.index
    common_cells = min(len(orig_index), len(wm_index))

    # Assuming orig_labels and wm_labels are pandas Series or DataFrame columns
    common_cells = min(len(orig_index), len(wm_index))

    labels1 = orig_labels.reindex(orig_index[:common_cells]).values
    labels2 = wm_labels.reindex(wm_index[:common_cells]).values

    # Convert labels1 and labels2 to numeric, coercing errors to NaN
    labels1 = pd.to_numeric(labels1, errors="coerce")
    labels2 = pd.to_numeric(labels2, errors="coerce")

    # Check for NaN values in labels1 and labels2
    if np.isnan(labels1).any() or np.isnan(labels2).any():
        # Option 1: Remove rows with NaN values
        valid_indices = ~np.isnan(labels1) & ~np.isnan(labels2)
        labels1 = labels1[valid_indices]
        labels2 = labels2[valid_indices]

        # Option 2: Replace NaN values with a specific value (e.g., -1)
        # labels1 = np.nan_to_num(labels1, nan=-1)
        # labels2 = np.nan_to_num(labels2, nan=-1)

    # Calculate adjusted Rand score
    ari_score = adjusted_rand_score(labels1, labels2)

    # 7. 【内存友好】近似 Mantel 测试（基于 PCA 空间）
    mantel_r = None
    if use_approx_mantel:
        if verbose:
            print(
                f"    正在进行近似 Mantel 测试（采样 {mantel_sample_size} 个细胞）..."
            )

        # 随机采样细胞索引
        n_total = len(orig_labels)
        sample_n = min(mantel_sample_size, n_total)
        sample_idx = np.random.choice(n_total, size=sample_n, replace=False)

        # 获取 PCA 子集（float32）
        pca_orig = adata_combined[adata_combined.obs[batch_key] == "original"].obsm[
            "X_pca"
        ]
        pca_wm = adata_combined[adata_combined.obs[batch_key] == "stego"].obsm["X_pca"]

        # 对齐索引后采样
        pca_orig_sample = pca_orig[sample_idx].astype(np.float32)
        pca_wm_sample = pca_wm[sample_idx].astype(np.float32)

        # 分块计算距离矩阵（避免内存溢出）
        def chunked_pdist(X, chunk_size=500):
            n = X.shape[0]
            D = []
            for i in range(0, n, chunk_size):
                D.append(cdist(X[i : i + chunk_size], X, metric="euclidean").ravel())
            return np.hstack(D)

        try:
            D1 = chunked_pdist(pca_orig_sample)
            D2 = chunked_pdist(pca_wm_sample)
            mantel_r = np.corrcoef(D1, D2)[0, 1]
        except MemoryError:
            if verbose:
                print("    即使采样仍发生内存错误，跳过 Mantel...")
            mantel_r = np.nan
    else:
        mantel_r = np.nan

    # 8. 可视化（可选）
    if plot:
        if verbose:
            print("    绘制 UMAP 可视化...")
        with plt.rc_context({"figure.figsize": (10, 5)}):
            fig, axes = plt.subplots(1, 2)

            # UMAP by batch
            sc.pl.umap(
                adata_combined,
                color=batch_key,
                ax=axes[0],
                title="Batch",
                show=False,
                frameon=True,
            )

            # UMAP by cell type or leiden
            color_by = label_key if not use_clustering else "leiden"
            sc.pl.umap(
                adata_combined,
                color=color_by,
                ax=axes[1],
                title=color_by,
                show=False,
                frameon=True,
            )

            plt.tight_layout()
            # plt.show()
            plt.savefig(output_figure_path, dpi=300, bbox_inches="tight")

    # 9. 兼容性判断
    is_compatible = (ari_score > 0.9) and (
        mantel_r > 0.8 if not np.isnan(mantel_r) else True
    )

    print("  UMAP 分析完成:")
    print("  聚类一致性 (ARI): {ari_score:.4f}")
    print(
        f"    结构相似性 (近似 Mantel): {mantel_r:.4f}"
        if not np.isnan(mantel_r)
        else "Mantel: 跳过"
    )
    print("  是否兼容: {is_compatible}")

    result_tmp = {
        "clustering_similarity_ari": float(ari_score),
        "structure_similarity_mantel": float(mantel_r)
        if not np.isnan(mantel_r)
        else None,
        "is_compatible": is_compatible,
        "details": {
            "n_pcs": n_comps,
            "umap_neighbors": umap_neighbors,
            "umap_min_dist": umap_min_dist,
            "batch_key": batch_key,
            "label_key": label_key,
            "sample_size_for_mantel": mantel_sample_size
            if use_approx_mantel
            else "skipped",
        },
    }
    return {
        "聚类一致性 (ARI)": float(ari_score),
        "结构相似性 (近似 Mantel)": float(mantel_r),
    }


# endregion


# ===================== 主程序 =====================
def MainFunc(
    method="Embr",
    sn=3,
    m=7,
):
    # 配置文件路径
    file_list = [
        "5a3bd24d-2df4-4cf4-b90d-56d457af3ae1.h5ad",
        "6ac352e8-b685-4ba1-9cde-769621f4e1c9.h5ad",
        "7a52a68e-b1dc-44e4-827f-c2e71b2d43c1.h5ad",
        "7ea2a79e-01cd-4389-93a1-1ffc2336af84.h5ad",
        "768d939a-e37e-442c-a638-6452bd3b9eab.h5ad",
        "aadabf62-c63b-4fb6-a976-bc8a22e63b6b.h5ad",
        "b605970e-f731-4b3e-81c6-f809549fbd11.h5ad",
        "fd011bed-343f-4292-b7f1-1a8a6250aeb7.h5ad",
    ]
    for fn in file_list:
        INPUT_FILE = r"D:\BioInfo数据集\original" + "\\" + fn
        base, ext = os.path.splitext(INPUT_FILE)
        base = base.replace("\original", "")
        OUTPUT_FILE_1 = f"{base}_File1 sn_{sn} original{ext}"  # save as
        OUTPUT_FILE_2 = f"{base}_File2 sn_{sn} stego{ext}"
        OUTPUT_FILE_3 = f"{base}_File3 sn_{sn} restored{ext}"
        OUTPUT_RESULT_TXT = f"{base}_sn_{sn}_result.txt"
        OUTPUT_FIGURE = f"{base}_sn_{sn}_umap figure.png"

        SECRET_MSG = "BIO-SECRET-2025 " * 20  # ASCII 字符串, 320字节

        # 将snbs提取出来并存储为txt文件
        # SNBS_OUTPUT_FILE_1 = f"{base}_SNBS_File1 sn_{sn} original.txt"  #save as
        # adata1 = ad.read_h5ad(OUTPUT_FILE_1)
        # snbs1 = HidingMethodEmbr.GetSnbBitStr(adata1.X.data,sn)
        # open(SNBS_OUTPUT_FILE_1, 'w').write(''.join(snbs1))
        # SNBS_OUTPUT_FILE_2 = f"{base}_SNBS_File2 sn_{sn} stego.txt"
        # adata2 = ad.read_h5ad(OUTPUT_FILE_2)
        # snbs2 = HidingMethodEmbr.GetSnbBitStr(adata2.X.data,sn)
        # open(SNBS_OUTPUT_FILE_2, 'w').write(''.join(snbs2))
        # SNBS_OUTPUT_FILE_3 = f"{base}_SNBS_File3 sn_{sn} restored.txt"
        # adata3 = ad.read_h5ad(OUTPUT_FILE_3)
        # snbs3 = HidingMethodEmbr.GetSnbBitStr(adata3.X.data,sn)
        # open(SNBS_OUTPUT_FILE_3, 'w').write(''.join(snbs3))

        print("=" * 50)
        print("单细胞数据可逆信息隐藏系统")
        print("=" * 50)

        # region 阶段1: 数据嵌入
        print(
            f"阶段1: {INPUT_FILE} 1 start @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        t0 = time.perf_counter()
        if not os.path.exists(OUTPUT_FILE_2):
            if method == "Embr":
                embeddedData = EmbedData(
                    INPUT_FILE,
                    OUTPUT_FILE_1,
                    OUTPUT_FILE_2,
                    SECRET_MSG,
                    sn,
                    m,
                    method="Embr",
                )
            if method == "de":
                embeddedData, swapFlags = EmbedData(
                    INPUT_FILE, OUTPUT_FILE_2, SECRET_MSG, method="de"
                )
        print("    耗时:", time.perf_counter() - t0)
        print(
            f"阶段1: {INPUT_FILE} 1 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        # endregion

        # region 阶段2: 数据提取
        print(
            f"阶段2: {INPUT_FILE} 2 start @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        t0 = time.perf_counter()
        compairSecStr = ""
        extractedData = ""
        if not os.path.exists(OUTPUT_FILE_3):
            if method == "Embr":
                extractedData = ExtractData(
                    OUTPUT_FILE_2,
                    OUTPUT_FILE_3,
                    len(SECRET_MSG) * 8,
                    sn,
                    m,
                    method="Embr",
                )
            if method == "de":
                extractedData = ExtractData(
                    OUTPUT_FILE_2, OUTPUT_FILE_3, len(SECRET_MSG) * 8, method="de"
                )
        # print(extractedData)
        compairSecStr = SECRET_MSG == extractedData
        # 追加写入 txt
        with open(OUTPUT_RESULT_TXT, "a", encoding="utf-8") as f:
            f.write(f"\n=== {INPUT_FILE} ===\n")
            f.write(
                "\n=== Result of secret string restored @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S') ===\n"
            )
            f.write(f"    Result of secret string restored:{compairSecStr}")
        print("    耗时:", time.perf_counter() - t0)
        print(
            f"阶段2: {INPUT_FILE} 2 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        # endregion

        # region 阶段3: 生物兼容性验证
        # 3-1
        print(
            f"阶段3: {INPUT_FILE} 3-1 start @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )

        found = False
        if os.path.exists(OUTPUT_RESULT_TXT):
            target = "结构相似性 (近似 Mantel)"  # results alreay exist
            with open(OUTPUT_RESULT_TXT, "r", encoding="utf-8") as f:
                for line in f:
                    if target in line:
                        found = True
                        break
        if found:
            print(
                f"阶段3: {INPUT_FILE} 3-3 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
            )
            continue  # skip

        print(
            f"\n=== 数值保真度检查(origin - stego file) @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n"
        )
        t0 = time.perf_counter()
        result = VerifyNumericalFidelity(OUTPUT_FILE_1, OUTPUT_FILE_2, verbose=True)
        # 追加写入 txt
        with open(OUTPUT_RESULT_TXT, "a", encoding="utf-8") as f:
            f.write(f"\n=== {INPUT_FILE} ===\n")
            f.write(
                f"\n=== 数值保真度检查(origin - stego file) @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n"
            )
            result = {k: float(v) for k, v in result.items()}
            json.dump(result, f, ensure_ascii=False, indent=2)

        print(
            f"\n=== 数值保真度检查(origin - restored file) @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n"
        )
        result = VerifyNumericalFidelity(OUTPUT_FILE_1, OUTPUT_FILE_3, verbose=True)
        # 追加写入 txt
        with open(OUTPUT_RESULT_TXT, "a", encoding="utf-8") as f:
            f.write(f"\n=== {INPUT_FILE} ===\n")
            f.write(
                f"\n=== 数值保真度检查(origin - restored file) @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} ===\n"
            )
            result = {k: float(v) for k, v in result.items()}
            json.dump(result, f, ensure_ascii=False, indent=2)

        print(
            f"阶段3: {INPUT_FILE} 3-1 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        # 3-2
        # print("PCA分析")
        print(
            f"{INPUT_FILE} 3-2 start @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        result = VerifyPca(OUTPUT_FILE_1, OUTPUT_FILE_2)
        with open(OUTPUT_RESULT_TXT, "a", encoding="utf-8") as f:
            f.write(
                "\n=== PCA验证 @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S') ===\n"
            )
            json.dump(result, f, ensure_ascii=False, indent=2)
            f.write("\n")
        print(
            f"阶段3: {INPUT_FILE} 3-2 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        # 3-3
        # print("UMAP分析")
        print(
            f"阶段3: {INPUT_FILE} 3-3 start @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        result = VerifyWithSharedUMAP(
            file1_path=OUTPUT_FILE_1,
            file2_path=OUTPUT_FILE_2,
            output_figure_path=OUTPUT_FIGURE,
            batch_key="dataset",
            label_key="cell_type",  # 替换为你的真实列名，或留空触发 leiden
            n_comps=30,  # 更小的 PCA 维度
            umap_neighbors=20,  # 更小的邻居数
            mantel_sample_size=1000,  # 控制采样量
            use_approx_mantel=True,  # 启用近似 Mantel
            plot=True,
            verbose=True,
        )
        # print(result)

        with open(OUTPUT_RESULT_TXT, "a", encoding="utf-8") as f:
            f.write(
                "\n=== UMAP验证 @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S') ===\n"
            )
            json.dump(result, f, ensure_ascii=False, indent=2)
            f.write("\n")
        print("    耗时:", time.perf_counter() - t0)
        print(
            f"阶段3: {INPUT_FILE} 3-3 end @ {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
        )
        # endregion
    pass


if __name__ == "__main__":
    for i in range(5, 0, -1):
        MainFunc(
            method="Embr",
            sn=i,
            m=7,
        )
