import scanpy as sc
import matplotlib.pyplot as pl
import os
import logging
import pandas as pd 
import omicverse as ov
import anndata as ad
import symphonypy as sp


def ck_file_dir(path:str) -> str:
    """
    ## Description:
    Check if the directory of the given file path exists, and create it if it does not.

    This function takes a file path, extracts its directory, and checks if the directory exists. 
    If the directory does not exist, it creates the directory and logs the creation using the `logging` module.

    Parameters:
        path (str) : The file path whose directory needs to be checked or created.

    Returns:
        path (str) : The original file path, ensuring that its directory exists.

    Example:
        ck_file_dir("output/results/file.txt")\n
        If 'output/results/' does not exist, it will be created, and the function will return "output/results/file.txt".

    Author: WYK    
    Date: 2025-01
    """
    path_for_create = os.path.dirname(path)
    if(not os.path.exists(path=path_for_create)):
        logging.info(f"Path '{path_for_create}' is automatically created.")
        os.makedirs(path_for_create)
    return path


def ck_dir(path:str) -> None:
    """
    ## Description:
    Check if the specified directory exists, and create it if it does not.

    This function checks if the given directory path exists. If the directory does not exist, 
    it creates the directory and logs the creation using the `logging` module.

    ## Parameters:
        - path (str): The directory path to check or create.

    ## Returns:
        - path (str): The original directory path, ensuring that it exists.

    ## Example:
        ck_dir("output/results/")
        # If 'output/results/' does not exist, it will be created, and the function will return "output/results/".

    Author: WYK    
    Date: 2025-01
    """
    if(not os.path.exists(path=path)):
        logging.info(f"Path '{path}' is automatically created.")
        os.makedirs(path)
    return path


def timer_decorator(func):
    """
    ## Description:
    A decorator function that measures and prints the execution time of the decorated function.

    This decorator wraps around a function to record the time taken for the function to execute. 
    It prints the duration in seconds, centered within a line of '=' characters for better visibility.

    ## Parameters:
        - func (callable): The function to be decorated.

    ## Returns:
        - wrapper (callable): The wrapped function that includes timing functionality.

    ## Example:
        @timer_decorator\n
        def example_function():\n
            time.sleep(2)\n
            return "Done"

        example_function()
        # Output: ========== example_function takes: 2.0s ==========

    Author: WYK    
    Date: 2025-01
    """
    import time
    def wrapper(*args, **kwargs):
        start_time = time.time()  # 记录函数开始时间
        result = func(*args, **kwargs)  # 调用原始函数
        end_time = time.time()  # 记录函数结束时间
        # 打印运行时长
        print(
            f" {func.__name__} takes: {end_time - start_time:.1f}s ".center(50, '-'))
        return result  # 返回原始函数的结果
    return wrapper  # 返回装饰后的函数
   

def batch_remove(adata: ad.AnnData, methods='harmony',
                 batch_key='Sample',
                 use_rep='scaled|original|X_pca', n_pcs=30):
    """
    对给定的anndata对象进行批量移除处理，并返回一个新的anndata对象
    
    Parameters:
        - adata (anndata.AnnData): 输入的anndata对象
        - methods (str): 批量移除方法，默认为'harmony'，可选：'harmony', 'combat', 'scanorama', 'scVI'
        - batch_key (str): 批次所在列，默认为'Sample'
    """
    if methods == 'harmony':
        # try:
        #     import harmonypy
        # except ImportError:
        #     raise ImportError('Please install the harmonypy: `pip install harmonypy`.')
        # adata3 = adata.copy()
        # adata.obsm['X_pca_harmony'] = harmonypy.run_harmony(adata.obsm[use_rep], adata.obs, vars_use = batch_key,max_iter_harmony = 30).Z_corr.T
        ov.single.batch_correction(adata, batch_key=batch_key, use_rep=use_rep,
                                   methods=methods, n_pcs=n_pcs, max_iter_harmony=40)

    if methods == 'scVI':
        ov.single.batch_correction(adata, batch_key=batch_key, use_rep=use_rep,
                                   methods='scVI', n_layers=2, n_latent=30, gene_likelihood="nb", n_pcs=n_pcs)
    if methods in ['combat', 'scanorama']:
        ov.single.batch_correction(adata, batch_key=batch_key, use_rep=use_rep,
                                   methods=methods, n_pcs=n_pcs)


def qc_pl(adata, mito=25, n_gene=(500,2e4), n_counts=(0,2e4), od='out/4.GSE202642_hcc/out/1.py/') -> None:
    """
    对给定的anndata对象进行质量控制（QC）绘图，并将结果保存到指定的输出目录

    Parameters:
        adata (anndata.AnnData): 输入的anndata对象
        mito (int): 线粒体基因百分比阈值，默认为25
        n_gene (tuple): 基因数量范围，默认为(500, 2e4)
        n_counts (tuple): 总计数范围，默认为(0, 2e4)
        od (str): 输出目录，默认为'out/4.GSE202642_hcc/out/1.py/'

    返回:
        None
    """
    fig, axs = pl.subplots(ncols=2, figsize=(8, 4))
    ck_dir(od)
    sc.pl.scatter(adata, x='total_counts',
                  y='pct_counts_mt', ax=axs[0], show=False)
    sc.pl.scatter(adata, x='total_counts',
                  y='n_genes_by_counts', ax=axs[1], show=False)
    # draw horizontal red lines indicating thresholds.
    axs[0].hlines(y=mito, xmin=0, xmax=max(
        adata.obs['total_counts']), color='red', ls='dashed')
    axs[1].hlines(y=n_gene[0], xmin=0, xmax=max(
        adata.obs['total_counts']), color='red', ls='dashed')
    fig.tight_layout()
    pl.savefig(os.path.join(od, '0.qc_scatter.png'),
                bbox_inches='tight', dpi=300)
    for i in ["n_genes_by_counts", "total_counts", "pct_counts_mt"]:
        with pl.rc_context({'figure.figsize': (4, 4.2)}):
            sc.pl.violin(
                adata,
                i,
                jitter=0,
                multi_panel=False
            )
            # pl.xticks(rotation=45, ha='right')
        pl.savefig(os.path.join(
            od, f'0.qc_vln_{i}.png'), bbox_inches='tight', dpi=300)


def qc(adata:ad.AnnData, mito=25, n_gene=(500,2e4), n_counts=(0,2e4), od='out/4.GSE202642_hcc/out/1.py/',ribo = None):
    """
    对给定的anndata对象进行质量控制（QC）处理，并返回一个新的anndata对象

    Parameters:
        - adata (anndata.AnnData): 输入的anndata对象
        - mito (int): 线粒体基因百分比阈值，默认为25
        - n_gene (tuple): 基因数量范围，默认为(500, 2e4)
        - n_counts (tuple): 总计数范围，默认为(0, 2e4)
        - od (str): 输出目录，默认为'out/4.GSE202642_hcc/out/1.py/'
    返回:
        adata4qc (anndata.AnnData): 经过质量控制处理后的anndata对象
    """
    ck_dir(od)
    # 线粒体基因
    adata.var["mt"] = adata.var_names.str.startswith("MT-")
    # # 核糖体基因
    # adata.var["ribo"] = adata.var_names.str.startswith(("RPS", "RPL"))
    # # 血红蛋白基因
    # adata.var["hb"] = adata.var_names.str.contains(("^HB[^(P)]"))
    
    if ribo is not None:
        adata.var["ribo"] = adata.var_names.str.startswith(ribo)
        sc.pp.calculate_qc_metrics(
            adata, qc_vars=["ribo"], inplace=True, percent_top=[50], log1p=True
        )
    
    sc.pp.calculate_qc_metrics(
        adata, qc_vars=["mt"], inplace=True, percent_top=[50], log1p=True
    )
    qc_pl(adata = adata, mito=mito, n_gene=n_gene, 
          n_counts=n_counts, od= f'{od}/before_qc')
    adata4qc = adata.copy()
    adata4qc = adata4qc[adata4qc.obs.n_genes_by_counts < n_gene[1], :]
    adata4qc = adata4qc[adata4qc.obs.n_genes_by_counts > n_gene[0], :]
    adata4qc = adata4qc[adata4qc.obs.pct_counts_mt < mito, :]
    adata4qc = adata4qc[adata4qc.obs.total_counts < n_counts[1], :]
    adata4qc = adata4qc[adata4qc.obs.total_counts > n_counts[0], :]   
    
    print(f"rawdata gets {adata.obs.shape[0]} cell , delete {adata.obs.shape[0] - adata4qc.obs.shape[0]} cells by qc. {adata4qc.obs.shape[0]} cells.")
     
    qc_pl(adata = adata4qc, mito=mito, n_gene=n_gene, 
          n_counts=n_counts, od= f'{od}/after_qc')

    return adata4qc


def remove_doublelet(adata):
    """
    给一个anndata去除doublelet，返回一个新的anndata对象

    Parameters:
        adata (ad.AnnData): 输入的anndata对象

    Return:
        adata_no_doublelets (ad.AnnData): 去除双细胞后的anndata对象
    """
    n0 = adata.shape[0]
    print(f'Original cell number: {n0}')
    print('Begin of post doublets removal')
    sc.external.pp.scrublet(adata, random_state=0)
    adata_no_doublelets = adata[adata.obs['predicted_doublet'] == False, :].copy()
    n1 = adata_no_doublelets.shape[0]
    print(f'Cells retained after scrublet: {n1}, {n0-n1} removed.')
    print(f'End of post doublets removal.')
    return adata_no_doublelets


def sort_df_by_col(df1:pd.DataFrame, df2:pd.DataFrame, key_col_in_df1:str, key_col_in_df2:str):
    """
    将df1中的某一列，按照df2中的某一列进行排序,返回排序后的df1
    """
    sorted_df1 = df1.set_index(key_col_in_df1).loc[df2[key_col_in_df2]].reset_index()
    return sorted_df1


def umap_tsne(adata: ad.AnnData, n_pcs=50,
              use_rep="scaled|original|X_pca", random_state=2022,
              umap=True, tsne=False, mde=False) -> None:
    """
    执行降维可视化分析，包括UMAP、t-SNE和MDE方法。
    
    该函数基于单细胞RNA测序数据，使用不同的非线性降维方法进行可视化，
    结果将存储在AnnData对象的obsm属性中。

    Parameters
    ----------
    adata : anndata.AnnData
        包含单细胞RNA测序数据的AnnData对象
    n_pcs : int, optional
        用于邻居计算的 principal components 数量，默认=50
    use_rep : str, optional
        用于降维的嵌入表示，可选值：
        - "scaled|original|X_pca"：缩放后的数据、原始数据或PCA结果
        - 或其他存储在adata.obsm中的表示，默认="scaled|original|X_pca"
    random_state : int, optional
        随机种子，用于保证结果可重复性，默认=2022
    umap : bool, optional
        是否执行UMAP降维，默认=True
    tsne : bool, optional
        是否执行t-SNE降维，默认=False
    mde : bool, optional
        是否执行MDE（Manifold Dimension Estimation）降维，默认=False

    Returns
    -------
    None
        结果直接存储在输入的AnnData对象中：
        - UMAP结果：adata.obsm['X_umap']
        - t-SNE结果：adata.obsm['X_tsne'] 
        - MDE结果：adata.obsm['X_mde']

    Raises
    ------
    ValueError
        当use_rep指定的表示不在adata.obsm中时
    KeyError
        当必要的依赖包未安装时

    Notes
    -----
    1. UMAP方法会先计算邻居图，然后进行降维
    2. t-SNE直接基于指定的嵌入表示进行降维
    3. MDE使用ov.utils.mde函数进行计算
    4. 所有降维结果都是2维的，用于可视化

    Examples
    --------
    >>> import anndata as ad
    >>> import scanpy as sc
    
    >>> # 基本使用：只运行UMAP
    >>> umap_tsne(adata, umap=True, tsne=False, mde=False)
    
    >>> # 运行所有降维方法
    >>> umap_tsne(adata, n_pcs=30, use_rep="X_pca", 
    ...           umap=True, tsne=True, mde=True, 
    ...           random_state=42)
    
    >>> # 查看结果
    >>> print(adata.obsm['X_umap'].shape)  # UMAP坐标
    >>> print(adata.obsm['X_tsne'].shape)  # t-SNE坐标
    >>> print(adata.obsm['X_mde'].shape)   # MDE坐标

    See Also
    --------
    scanpy.pp.neighbors : 计算邻居图
    scanpy.tl.umap : UMAP降维
    scanpy.tl.tsne : t-SNE降维
    ov.utils.mde : MDE降维
    """
    if bool(umap):
        print("Running UMAP ...")
        sc.pp.neighbors(adata, n_neighbors=15, n_pcs=n_pcs,
                        use_rep=use_rep, random_state=random_state)
        sc.tl.umap(adata, random_state=random_state)

    if bool(tsne):
        print("Running t-SNE ...")
        sc.tl.tsne(adata, use_rep=use_rep, random_state=random_state)

    if bool(mde):
        print("Running mde...")
        adata.obsm["X_mde"] = ov.utils.mde(adata.obsm[use_rep])

    
def normalize_scale_pca(adata: ad.AnnData, n_HVGs=2000, npcs=30,
                        batch_key='batch_key_col', do_umap_tsne=False, by_scanpy=False) -> ad.AnnData:
    """
    Description
    Normalize, scale, and perform PCA on an AnnData object.
    This function performs normalization, scaling, and PCA on the AnnData object. It first normalizes the data, then scales it, and finally performs PCA. The number of highly variable genes (HVGs) and the number of principal components (PCs) can be specified. Optionally, it can also perform UMAP and t-SNE on the PCA results.  
    
    Parameters:
        adata (ad.AnnData) : The AnnData object to be processed.
        n_HVGs (int) : The number of highly variable genes to be selected.
        npcs (int) : The number of principal components to be computed.
        batch_key (str) : The column name in the AnnData object's obs dataframe representing the batch information. 
        do_umap_tsne (bool) : If True, performs UMAP and t-SNE on the PCA results. Default is False.
        by_scanpy (bool) : If True, uses scanpy's implementation for normalization, scaling, and PCA. Default is False.
        
    Returns:
        adata (ad.AnnData) : The processed AnnData object with normalized, scaled, and PCA results.
    Example:
        adata = normalize_scale_pca(adata, n_HVGs=2000, npcs=50, batch_key='batch', do_umap_tsne=True, by_scanpy=False)  
    Author: WYK    
    Date: 2025-01
    """
    adata.layers['counts'] = adata.X.copy()
    
    if bool(by_scanpy):
        sc.pp.normalize_total(adata, target_sum=1e5)
        sc.pp.log1p(adata)
        adata.layers["log1p_norm"] = adata.X.copy()
        
        sc.pp.highly_variable_genes(
            adata,
            batch_key=batch_key,
            n_top_genes=n_HVGs,
            flavor = 'seurat_v3'
        )
        adata.raw = adata.copy()
        adata.uns['layers_counts'] = adata.layers['counts'] 
        
        adata = adata[:, adata.var.highly_variable]
        sc.pp.scale(adata, max_value=10)
        sc.pp.pca(adata, n_comps=npcs, zero_center=False)
        if bool(do_umap_tsne):
            umap_tsne(adata, n_pcs=npcs, use_rep="X_pca")

    else:
        adata = ov.pp.preprocess(adata, mode='shiftlog|pearson', 
                                 n_HVGs=n_HVGs, organism = 'human',batch_key=batch_key)

        # 我们首先选择高可变基因，并将原数据存放在.raw中
        adata.raw = adata.copy()
        adata.uns['layers_counts'] = adata.layers['counts'] 
        
        adata = adata[:, adata.var['highly_variable_features'] == True]

        ov.pp.scale(adata, max_value=10)
        ov.pp.pca(adata, layer='scaled', n_pcs=npcs)
        if bool(do_umap_tsne):
            umap_tsne(adata, n_pcs=npcs, use_rep="scaled|original|X_pca")
            
    return adata


def ifelse(condition, true_value, false_value):
    if isinstance(condition, bool):
        return true_value if condition else false_value
    elif hasattr(condition, '__iter__'):
        return [true_value if c else false_value for c in condition]
    else:
        raise ValueError("Invalid condition. Must be a boolean or an iterable.")


class my_logging:
    def __init__(self, name = 'my_log', log_file=None, time_format = "%Y-%m-%d %H:%M:%S"):
        """
        ## Description:
        Initialize the `my_logging` class with a name, log file path, and time format.

        This constructor sets up the logging instance with a name, an optional log file path, and a time format for timestamping log messages. It initializes the necessary attributes to store log messages and manage the logging process.

        Parameters:
            name (str) : The name of the logging instance. Default is 'my_log'.
            log_file (str, optional) : The path to the log file where messages will be saved. If not provided, logging will only be printed to the console. Default is None.
            time_format (str) : The format for timestamps in log messages. Default is "%Y-%m-%d %H:%M:%S".

        ## Attributes:
            - time_format (str): The format used for timestamps in log messages.
            - msg (str): The current log message being processed.
            - log_file (str): The path to the log file where messages are saved.
            - name (str): The name of the logging instance.
            - msgs (list): A list to store all log messages generated by this instance.

        ## Example:
            logger = my_logging(name="my_logger", log_file="output/log.txt", time_format="%Y-%m-%d %H:%M:%S")
            logger.info("This is a log message.")
            > The message will be saved to 'output/log.txt' with a timestamp.

        Author: WYK    
        Date: 2025-01
        """
        self.time_format = time_format
        self.msg = None
        self.log_file = log_file
        self.name = name
        self.msgs = []
        
    def info(self, msg):
        """
        打印信息并写入日志文件
        
        Parameters:
            msg (str) : 要打印的信息
        """
        import datetime
        time_now = datetime.datetime.now().strftime(self.time_format)
        self.msg = f"[{time_now}] - {self.name} - {msg}"
        print(f"{self.msg}")
        self.msgs.append(self.msg)
        self._write(log_file=self.log_file, msg=self.msg)
        
    def del_all_msgs(self):
        """
        删除所有已有信息。
        """
        self.msgs = []
    
    def _ck_file_dir(self,path: str) -> None:
        """
        检查文件路径是否存在，如果不存在则创建该路径。

        Args:
            path (str): 要检查的文件路径。
        """
        path_for_create = os.path.dirname(path)
        path_for_create = os.path.abspath(path_for_create)
        if (not os.path.exists(path=path_for_create)):
            os.makedirs(path_for_create,exist_ok =True)
        return path
        
    def _write(self, log_file:str, msg:str):
        if log_file is not None:
            self._ck_file_dir(log_file)
            with open(log_file, 'a') as f:
                f.write(f"{msg}\n")
    
    def __repr__(self) -> str:
        str1 = f'<Class my_logging> the log of {self.name}, get {len(self.msgs)} msgs'
        if self.log_file is not None:
            str1 = f'{str1}, with {os.path.abspath(self.log_file)}'
        return str1


def sym_mapping(adata_ref: ad.AnnData, 
                adata_query: ad.AnnData, 
                tSNE_model_of_adata_ref = None,
                batch_key_col_in_query='batch_key_col', 
                adata_ref_label='source',
                normalized_data: bool = True,
                reference_primary_basis = "X_pca_harmony",
                do_label_prediction = True) -> ad.AnnData:
    """
    Perform Symphony mapping to align query data (`adata_query`) with reference data (`adata_ref`).

    This function standardizes and batch-corrects the query data, maps it to the reference data using UMAP, predicts labels, and calculates per-cell mapping scores. Optionally, it can also map t-SNE coordinates. if a t-SNE model for the reference data is provided.
    
    利用sym算法，Mapping query adata to reference adata.
    首先对adata_query 进行标准化和批次处理，然后进行UMAP 映射。
    Parameters:
        adata_ref (ad.AnnData) : reference adata
        adata_query (ad.AnnData) : query adata
        batch_key_col_in_query (str) :批次所在列 in query adata
        adata_ref_label (str) :想要往adata_ref上映射的adata_ref标签列
        normalized_data (bool) :是否对adata_query 进行标准化、归一化、去批次处理
        
    return:
        adata_query (ad.anndata)
    """
    
    logger = my_logging(name="sym_mapping")
    
    if bool(normalized_data):
        logger.info('标准化数据')
        adata_query = normalize_scale_pca(
            adata=adata_query, n_HVGs=3000, npcs=30, batch_key=batch_key_col_in_query, by_scanpy=True)
        logger.info('去批次')
        sp.pp.harmony_integrate(adata_query, key=batch_key_col_in_query,
                            verbose=True, max_iter_harmony=30)    
    
    # Mapping Harmony coordinates
    sp.tl.map_embedding(adata_query=adata_query, adata_ref=adata_ref,
                        transferred_adjusted_basis="X_pca_harmony_justed",
                        transferred_primary_basis="X_pca_harmony_reference",
                        reference_primary_basis=reference_primary_basis)

    logger.info('Mapping UMAP coordinates')
    # Mapping UMAP coordinates
    sp.tl.ingest(adata_query=adata_query, adata_ref=adata_ref,embedding_method = 'umap',
                 use_rep="X_pca_harmony_justed")
    
    
    if bool(do_label_prediction):
        logger.info('Labels prediction')
        # Labels prediction
        sp.tl.transfer_labels_kNN(
            adata_query=adata_query,
            adata_ref=adata_ref,
            ref_labels=[adata_ref_label],
            ref_basis=reference_primary_basis,
            query_basis="X_pca_harmony_justed"
        )

    logger.info('Per cell mapping score')
    # Per cell mapping score
    sp.tl.per_cell_confidence(
        adata_query=adata_query,
        adata_ref=adata_ref,
        ref_basis_adjusted=reference_primary_basis,
        query_basis_adjusted="X_pca_harmony_justed",
        transferred_primary_basis="X_pca_harmony_reference",
        obs="symphony_per_cell_dist"
    )
    
    logger.info('Mapping t-SNE coordinates')
    if not tSNE_model_of_adata_ref is None:
        sp.tl.tsne(adata_query, use_rep="X_pca_harmony_justed", use_model=tSNE_model_of_adata_ref)
        logger.info('Mapping Finished')
        logger.del_all_msgs()

    return adata_query


import typing as tp
def sym_mapping_plt1(adata_ref: ad.AnnData,
                     adata_after_sym: ad.AnnData,
                     cohort_type: str = 'NSCLC',
                     od: str = "out/12.构建发育ref/0.发育数据",
                     key_col_for_map: str='source',
                     figsize: tuple = (5, 5),
                     legend_fontsize: tp.Union[float, None] = 11,
                     other_color_dict: tp.Optional[dict[str, list[str]]] = None) -> None:
    """
    Generate and save UMAP and t-SNE plots for reference and query datasets after Symphony mapping.

    This function creates a series of UMAP and t-SNE visualizations for both the reference (`adata_ref`) and query (`adata_after_sym`) datasets. It also generates plots that combine both datasets to visualize their alignment. The function saves the plots to the specified output directory.

    Args:
        - adata_ref (ad.AnnData) : The reference AnnData object containing the reference dataset.
        - adata_after_sym (ad.AnnData) : The query AnnData object after Symphony mapping.
        - cohort_type (str) : The type of cohort being analyzed (e.g., 'NSCLC'). Default is 'NSCLC'.
        - od (str) : The output directory where the plots will be saved. Default is "out/12.构建发育ref/0.发育数据".
        - key_col_for_map (str) : The key in `adata_ref.obs` and `adata_after_sym.obs` used for mapping cell types. Default is 'source'.
        - figsize (tuple) : The size of the figures to be generated. Default is (5, 5).
        - legend_fontsize (float or None) : The font size for the legend. Default is 11.
        - other_color_dict (dict or None) : A dictionary containing additional color mappings for specific cell types. Default is None.

    ## Returns:
        None. The function saves the generated plots to the specified output directory.

    ## Example:
        sym_mapping_plt1(adata_ref, adata_after_sym, cohort_type='NSCLC', od="output/plots", key_col_for_map='source')

    Author: WYK    
    Date: 2025-01
    """
    a = ["#215ea8", "#f768a0", "#41ab5d", "#d55f4d", "#d7859d",
         "#689e45", "#5a8ba8", "#92c1b6", "#dfc27c", "#80b0d2",'#19a9a9"']
    names = ['HCC', 'NSCLC', 'CRC', 'PIE', 'PoIE',
             'Plac', 'emb', 'cancer', 'Ref', 'normal_rectum',"mice_bearing_tumor"]
    d = {name: value for name, value in zip(names, a)}

    if other_color_dict is not None:
        d.update(other_color_dict)

    print(f"color key:\n{d}\n")

    sc.pl.umap(
        adata_after_sym,
        color=["symphony_per_cell_dist", key_col_for_map],
        frameon=False,
        title=["Mapping score\n(less is better)",
               "Mapping cell type in query dataset"],
        vmax=20,
        cmap="RdBu_r", legend_fontsize=legend_fontsize
    )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.umap_sym_cell_score.png"),
               bbox_inches='tight', dpi=300)

    pl.rcParams['figure.figsize'] = figsize
    ov.utils.embedding(adata_after_sym,
                       basis='X_umap',
                       color=key_col_for_map,
                       palette=d,
                       frameon='small',
                       title=f"Query dataset {cohort_type}",
                       show=False
                       )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.umap_{cohort_type}.png"),
               bbox_inches='tight', dpi=300)

    ov.utils.embedding(
        adata_ref, basis='X_umap',
        color=key_col_for_map,
        palette=d,
        frameon='small',
        title="Reference dataset emb",
        show=False, legend_fontsize=legend_fontsize
    )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.umap_reference.png"),
               bbox_inches='tight', dpi=300)

    pl.rcParams['figure.figsize'] = figsize
    ov.utils.embedding(adata_after_sym,
                       basis='X_tsne',
                       color=key_col_for_map,
                       palette=d,
                       frameon='none',
                       title=f"Query dataset {cohort_type}",
                       show=False, legend_fontsize=legend_fontsize
                       )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.tsne_{cohort_type}.png"),
               bbox_inches='tight', dpi=300)

    ov.utils.embedding(
        adata_ref, basis='X_tsne',
        color=key_col_for_map,
        palette=d,
        frameon='none',
        title="Reference dataset emb",
        show=False, legend_fontsize=legend_fontsize
    )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.tsne_reference.png"),
               bbox_inches='tight', dpi=300)

    adatas = sc.concat([adata_ref, adata_after_sym], join='outer')
    i = adatas.obs['batch_key_col'].str.contains(cohort_type).to_list()
    adatas.obs['type'] = 'Ref'
    adatas.obs['type'][i] = cohort_type

    # UMAP TSNE绘图
    pl.rcParams['figure.figsize'] = figsize
    ov.utils.embedding(
        adatas, 
        basis='X_umap',
        color="type",
        frameon='none',
        palette=d,
        title="Ref and cancer",
        show=False, 
        legend_fontsize=legend_fontsize
    )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.umap_{cohort_type}_Ref_and_cancer.png"),
               bbox_inches='tight', dpi=300)

    ov.utils.embedding(
        adatas, basis='X_tsne',
        color="type",
        frameon='none',
        palette=d,
        legend_loc='none',
        title="Ref and cancer",
        show=False, 
        legend_fontsize=legend_fontsize
    )
    pl.savefig(ck_file_dir(f"{od}/1.{cohort_type}/1.tsne_{cohort_type}_Ref_and_cancer.png"),
               bbox_inches='tight', dpi=300)


def get_dict_fromJSON(json_path:str):
    """
    ## Description:
    Load a JSON file and return its contents as a Python dictionary.

    This function reads a JSON file from the specified path and converts it into a Python dictionary. 
    The JSON file should contain a valid JSON structure that can be directly converted into a dictionary.

    Args:
        json_path (str) : The path to the JSON file to be loaded.

    ## Returns:
        dict: A dictionary containing the data from the JSON file.

    ## Example:
        json_path = "data/markers.json"
        marker_dict = get_dict_fromJSON(json_path)

    Author: WYK  
    Date: 2025-01
    """
    import pathlib
    json_path = pathlib.Path(json_path)
    
    if json_path.is_file() and json_path.suffix == '.json':
        import json
        with open(json_path,'r') as f:
            res = dict(json.load(f))
    else:
        res = None
        ValueError("`json_path` is not a json file.")
    return res

def ensure_gene_dict_in_adata(adata: ad.AnnData, martker_genes_dict: dict):
    """
    检查标记基因是否存在于AnnData对象中，并返回一个新的字典，其中包含存在于AnnData对象中的标记基因。

    Args:
        adata (ad.AnnData): AnnData对象，包含基因表达数据。
        martker_genes_dict (dict): 一个字典，其中键是细胞类型，值是标记基因列表。

    Returns:
        dict: 一个新的字典，其中键是细胞类型，值是存在于AnnData对象中的标记基因列表。
    """
    # 创建一个新的字典来存储存在于AnnData对象中的标记基因
    marker_genes_in_data = dict()
    
    # 遍历输入的标记基因字典
    for ct, markers in martker_genes_dict.items():
        # 创建一个列表来存储在AnnData对象中找到的标记基因
        markers_found = list()
        
        # 遍历每个细胞类型的标记基因列表
        for marker in markers:
            # 检查标记基因是否存在于AnnData对象的原始数据的基因索引中
            if marker in adata.raw.var.index:
                # 如果存在，将标记基因添加到找到的标记基因列表中
                markers_found.append(marker)
        
        # 将找到的标记基因列表添加到新的字典中，键为细胞类型
        marker_genes_in_data[ct] = markers_found

    # 返回包含存在于AnnData对象中的标记基因的新字典
    return marker_genes_in_data


def cls_in_anndata(adata: ad.AnnData, 
                   n_neighbors=15, 
                   n_pcs=30,
                   use_rep="X_pca_harmony", 
                   res_list=[0.05, 0.1, 0.2, 0.5, 1.0, 1.2, 1.5, 2.0],
                   cls_col=ov.palette()[4:], 
                   basis='X_umap', od='./') -> None:
    """ 
    This function performs clustering on the provided AnnData object using the Leiden algorithm at multiple resolutions.  
    It computes the nearest neighbor graph based on the specified representation and number of neighbors, and then performs clustering at each resolution.  

    -----
    Args:
        adata (ad.AnnData):  The AnnData object to perform clustering on.
        n_neighbors (int): The number of neighbors to consider for the nearest neighbor graph. Default is 15.
        n_pcs (int): The number of principal components to use for the nearest neighbor graph. Default is 30.
        use_rep (str): The representation to use for the nearest neighbor graph. Default is "X_pca_harmony".
        res_list (list of float): A list of resolution values to use for the Leiden clustering. Default is [0.05, 0.1, 0.2, 0.5, 1.0, 1.2, 1.5, 2.0].
        cls_col (list): A list of colors to use for the clusters. Default is a subset of the `ov.palette()`.
        basis (str): The basis to use for the embedding visualization. Default is 'X_Umap'.
        od (str): The output directory to save the visualization. Default is './'.

    ## Returns
        None. The function saves a visualization of the clustering results at different resolutions to the specified output directory.
        
    Author: WYK  
    Date: 2025-01
    """
    if not all(i in adata.obsm.keys() for i in [use_rep, basis]):
        raise ValueError(
            f"Please check the `use_rep` and `basis` in adata.obsm.keys()")    
    
    sc.pp.neighbors(adata,
                    n_neighbors=n_neighbors,
                    n_pcs=n_pcs,
                    use_rep=use_rep)

    import tqdm
    for res in tqdm.tqdm(res_list):
        # print(f"Runing in Resolution {res}")
        sc.tl.leiden(adata, key_added=f"leiden_res_{res}", resolution=res)

    sc.set_figure_params(figsize=(4.5, 4.5))
    ov.utils.embedding(adata,
                       basis=basis,
                       color=[f"leiden_res_{i}" for i in res_list],
                       title=[f"Resolution: {i}" for i in res_list],
                       palette=cls_col,
                       ncols=3,
                       show=False,
                       frameon='small')
    pl.savefig(
        ck_file_dir(f"{od}/0.cls_{basis}.png"), bbox_inches='tight', dpi=300)


def rank_genes_groups_res_to_df(result: any) -> pd.DataFrame:
    """
    ## Description:
    Convert the result of `sc.tl.rank_genes_groups` into a pandas DataFrame.

    This function takes the result object from `sc.tl.rank_genes_groups` and converts it into a structured DataFrame. 
    The DataFrame contains information about the differentially expressed genes (DEGs) for each group, including gene names, 
    scores, log2 fold changes, p-values, and adjusted p-values.

    ## Parameters:
        - result (dict-like): The result object returned by `sc.tl.rank_genes_groups`. It should contain the following keys:
            - 'names': A structured array containing gene names for each group.
            - 'scores': A structured array containing scores for each gene in each group.
            - 'pvals': A structured array containing p-values for each gene in each group.
            - 'pvals_adj': A structured array containing adjusted p-values for each gene in each group.
            - 'logfoldchanges': A structured array containing log2 fold changes for each gene in each group.

    ## Returns:
        - pd.DataFrame: A DataFrame with the following columns:
            - 'Group': The name of the group (cluster or condition).
            - 'Gene': The name of the gene.
            - 'Score': The score of the gene in the group.
            - 'log2FC': The log2 fold change of the gene in the group.
            - 'Pval': The p-value of the gene in the group.
            - 'Pval_adj': The adjusted p-value of the gene in the group.

    ## Example:
        sc.tl.rank_genes_groups(adata, groupby='leiden')\n
        df = rank_genes_groups_res_to_df(result = adata.uns['rank_genes_groups'])\n
        print(df.head())\n

    Author: WYK  
    Date: 2025-01
    """
    import tqdm
    group_names = result['names'].dtype.names  # 获取所有组名
    data = []
    for group in tqdm.tqdm(group_names):
        for i in range(len(result['names'][group])):
            gene_name = result['names'][group][i]
            score = result['scores'][group][i]
            pval = result['pvals'][group][i]
            pval_adj = result['pvals_adj'][group][i]
            log2FC = result['logfoldchanges'][group][i]
            data.append([group, gene_name, score, log2FC, pval, pval_adj])
    # 创建 DataFrame
    df = pd.DataFrame(
        data, columns=['Group', 'Gene', 'Score', 'log2FC', 'Pval', 'Pval_adj'])
    return df


def marker_dot_plot(adata: ad.AnnData, od: str = None,
                    groupby='leiden_res_0.5',
                    use_rep="X_pca_harmony",
                    marker_json_path="/mnt/d/WSL/wsl_wyk_202508/wangyk/project/1.tumor_mimicry_shiyue/GDT240909001/out/17.各癌症数据分群注释/cancer_cell_type_classic_marker_gene.json") -> None:
    """
    ## Description:
    Generate a dot plot for marker genes in an AnnData object, grouped by a specified clustering resolution.

    This function reads a JSON file containing marker genes for different cell types, filters the genes to ensure they are present in the AnnData object, and then generates a dot plot. The dot plot visualizes the expression of marker genes across different clusters, with the option to scale the expression values.

    Parameters:
        adata (ad.AnnData) : The AnnData object containing the single-cell data.
        groupby (str) : The key in `adata.obs` to group the cells by, typically a clustering result (e.g., 'leiden_res_0.5'). fault is 'leiden_res_0.5'.
        use_rep (str) : The representation to use for the dendrogram calculation. Default is "X_pca_harmony".
        marker_json_path (str) : The path to the JSON file containing the marker genes for different cell types. Default is a ecific path.
        od (str) : The output directory where the dot plot will be saved.

    ## Returns:
        None. The function saves the dot plot as a PNG file in the specified output directory.
        
    Author: WYK    
    Date: 2025-01
    """
    marker_df = get_dict_fromJSON(marker_json_path)
    marker_df_filtered = ensure_gene_dict_in_adata(adata, marker_df)

    sc.tl.dendrogram(adata, groupby=groupby, use_rep=use_rep, use_raw=True)
    sc.pl.dotplot(
        adata,
        groupby=groupby,
        var_names=marker_df_filtered,
        dendrogram=True,
        standard_scale="var",  # standard scale: normalize each gene to range from 0 to 1
    )
    pl.savefig(
        ck_file_dir(f"{od}/0.marker_gene_dot.png"), bbox_inches='tight', dpi=300)



from typing import Dict, Union, List
os.environ['API_KEY_FOR_CALSSIFY'] = 'sk-4d4b08ede4404554a339471736bdf7eb'


def classify_cell_types_by_ai(gene_dict: Dict[str, List[str]],
                              specie_name: str = 'human',
                              tissue: str = 'Hepatocellular Carcinoma',
                              model: str = 'deepseek-chat',
                              base_url: str = "https://api.deepseek.com/") -> Union[Dict, str]:   
    """
    Classify cell types based on marker genes using AI models (OpenAI, DeepSeek or other).

    This function takes a dictionary of marker genes for clusters in a single-cell analysis and uses an AI model (either OpenAI or DeepSeek) to predict the possible cell types for each cluster. The function returns a dictionary where the keys are cluster numbers and the values are lists of possible cell types. The function also handles errors related to API key configuration and JSON parsing.

    Parameters:
        gene_dict (Dict[str, List[str]]) :A dictionary where keys are cluster numbers (as strings) and values are lists of marker genes associated with each cluster.
        specie_name (str) : The name of the species (e.g., 'human'). Default is 'human'.
        tissue (str, optional):  The type of tissue being analyzed (e.g., 'breast cancer'). Default is 'Hepatocellular Carcinoma'.
        model (str, optional) :  The AI model to use for classification. Default is 'deepseek-chat'.
        base_url (str, optional) :  The base URL to use for the API requests. Defaults to "https://api.deepseek.com/beta". This parameter is used exclusively when `model` is set to 'deepseek-chat'.

    ## Returns:
        - Union[Dict, str]: 
            - If successful, returns a dictionary where keys are cluster numbers and values are lists of possible cell types.
            - If an error occurs, returns a string describing the error (e.g., JSON parsing error, missing API key, or other exceptions).

    ## Example:
        gene_dict = {
            "1": ["CD3E", "CD4"],
            "2": ["CD19", "CD20"]
        }\n
        result = classify_cell_types_by_ai(gene_dict, specie_name='human', tissue='Lymphoma', model='deepseek-chat')\n
        print(result)\n
        # Output might be: {"1": ["CD4+ T cells"], "2": ["B cells"]}

    ## Notes:
        - The function requires the environment variables `API_KEY_FOR_CALSSIFY` to be set depending on the model used.
        - The AI-generated results should be manually verified for accuracy.

    Author: WYK    
    Date: 2025-01
    """
    import json
    from openai import OpenAI
    
    gene_str = json.dumps(gene_dict, indent=4, ensure_ascii=False)

    print(f"根据 '{model}' 来辅助细胞类型判断，回答未必正确无误，请仔细甄别。")
    prompt = f"""
    The following is a dictionary of marker genes for clusters in a single-cell analysis:
    {gene_str}

    Identify cell types of {tissue} in {specie_name}, please determine the possible cell types for each cluster.

    Please strictly return the result in the following JSON format, where the keys are cluster numbers (string type) and the values are lists of possible cell types (list of strings):
    {{
        "1": "cell_type1, cell_type2",
        "2": "cell_type3",
        ...
    }}
    Do not include any additional explanatory text or code. If the cell type cannot be determined, set the value for the corresponding cluster to null. Provide the official English names for the cell types. Suggests that these clusters are not homogeneous and may contain more than one distinct cell type. Please provide as detailed cell type annotations as possible, such as distinguishing between subtypes.
    """
    
    try:
        api_key = os.environ.get('API_KEY_FOR_CALSSIFY')
        if not api_key:
            raise ValueError(
                "No valid API key found. Please set the environment variable `os.environ('API_KEY_FOR_CLASSIFY')`.")

        client = OpenAI(api_key=api_key, base_url=base_url)
        response = client.chat.completions.create(
            model='deepseek-chat',
            messages=[
                {"role": "system", "content": "You are a bioinformatics expert specializing in single-cell data analysis."},
                {"role": "user", "content": prompt}
            ]
        )
        result = response.choices[0].message.content

        import re
        json_str = re.search(r'\{.*\}', result, re.DOTALL).group()
        # print(json_str)

        cell_type_dict = json.loads(json_str)
        return cell_type_dict

    except json.JSONDecodeError as e:
        return f"JSON 解析错误：{e}\n原始返回结果：{result}"  # 返回详细的错误信息
    except ValueError as e:
        return str(e)  # 返回环境变量错误信息
    except Exception as e:
        return f"发生未知错误：{e}"  # 捕获其他可能的异常



# Pandas 中常用的数据切片方法：
# 选择列：df['列名'] 或 df[['列1', '列2']]。
# 选择行：df.iloc[行索引] 或 df.loc[行标签]。
# 条件筛选：df[df['列'] > 值] 或 df.query('条件')。
# 位置切片：df.iloc[行范围, 列范围]。
# 标签切片：df.loc[行标签范围, 列标签列表]。
# 多级索引切片：df.xs 或 df.loc。