# src/scripts/all_step1_mutation_features_and_ids.py
import pandas as pd
import numpy as np
from pathlib import Path
import json
import warnings
import pyfaidx
from tqdm import tqdm
import re # 导入正则表达式模块
import pyarrow.parquet as pq # 新增导入 pyarrow.parquet，尽管在此文件中不直接用于读写，但保持一致性
from typing import Optional, List # 确保这一行存在且正确导入 Optional

# --- 警告过滤设置 ---
# 隐藏特定的 UserWarning，即关于邻近突变是 Indel 或不等长替换的警告
warnings.filterwarnings(
    "ignore",
    message="邻近突变 .* 是 Indel 或不等长替换。为保持固定长度，跳过此邻近突变。",
    category=UserWarning
)


# --- 配置参数 ---
CBIOPORTAL_INPUT_CSV_PATH = "/gpfs/flash/home/yzq/project/model/Mutabert/data/raw/cbioportal_data/cbioportal_mutations_grch38_converted.csv"
GENIE_INPUT_CSV_PATH = "/gpfs/flash/home/yzq/project/model/Mutabert/data/processed/genie_converted_mutation_data/genie_mutations_grch38_converted.csv"

REFERENCE_GENOME_FASTAS = {
    "GRCh37": "/ai/data-container/yzq/database/genome/human/hg19/GRCh37.p13.genome.fa",
    "GRCh38": "/ai/data-container/yzq/database/genome/human/hg38/GRCh38.primary_assembly.genome.fa",
}

# 输出目录，用于存放合并后的数据和全局ID映射
COMBINED_OUTPUT_DIR = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/data/processed/combined_mutation_data_0909"
Path(COMBINED_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)

# 定义突变类型分类到外显子/非外显子标志的映射，缺失值将通过 NaN 保留
VARIANT_CLASSIFICATION_TO_EXON_FLAG = {
    'Frame_Shift_Del': 1, 'Frame_Shift_Ins': 1, 'In_Frame_Del': 1, 'In_Frame_Ins': 1,
    'Missense_Mutation': 1, 'Nonsense_Mutation': 1, 'Nonstop_Mutation': 1,
    'Splice_Site': 1, 'Translation_Start_Site': 1,
    'Silent': 1,
    'Intron': 0, 'IGR': 0, 'RNA': 0, '3\'UTR': 0, '5\'UTR': 0,
    'Targeted_Region': 0,
    'Unknown': 0
}

# 染色体名称到数字ID的映射
CHROM_TO_NUMERIC_ID = {
    '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9,
    '10': 10, '11': 11, '12': 12, '13': 13, '14': 14, '15': 15, '16': 16,
    '17': 17, '18': 18, '19': 19, '20': 20, '21': 21, '22': 22,
    'X': 23, 'Y': 24, 'MT': 25,
    'chr1': 1, 'chr2': 2, 'chr3': 3, 'chr4': 4, 'chr5': 5, 'chr6': 6, 'chr7': 7, 'chr8': 8, 'chr9': 9,
    'chr10': 10, 'chr11': 11, 'chr12': 12, 'chr13': 13, 'chr14': 14, 'chr15': 15, 'chr16': 16,
    'chr17': 17, 'chr18': 18, 'chr19': 19, 'chr20': 20, 'chr21': 21, 'chr22': 22,
    'chrX': 23, 'chrY': 24, 'chrM': 25,
    'M': 25,
    '23': 23,
    '24': 24,
    '25': 25, 
}

# 允许的标准化染色体名称
ALLOWED_NORMALIZED_CHROMS = set([str(i) for i in range(1, 23)] + ['X', 'Y', 'MT'])

# 基因组位置分箱参数
BP_BIN_SIZE = 5000
MAX_POS_BIN = 50000 # 基于人最长染色体2.49亿 / 5000 = 49800，取50000作为上限

INVALID_SNP_CORE = "INVALID_SNP"
INVALID_INDEL_CORE = "INVALID_INDEL"


# --- 辅助函数 ---
_loaded_ref_genomes = {}
def load_all_ref_genomes(fasta_paths_dict):
    """
    预加载所有参考基因组FASTA文件。
    增加缓存检查，避免重复加载。
    """
    global _loaded_ref_genomes
    if not _loaded_ref_genomes: 
        print("正在加载所有指定的参考基因组...")
        for build, path in fasta_paths_dict.items():
            if not Path(path).exists():
                raise FileNotFoundError(f"参考基因组FASTA文件未找到，版本 '{build}': {path}")
            print(f"  正在加载 {build} 来自 {path}...")
            _loaded_ref_genomes[build] = pyfaidx.Fasta(path, as_raw=True)
        print("所有参考基因组加载完成。")
    return _loaded_ref_genomes

def _get_fasta_chrom_alias(input_chrom, ref_genome):
    """
    尝试从 pyfaidx 参考基因组中找到与输入染色体名称匹配的别名。
    考虑到常见的染色体命名约定（带'chr'或不带'chr'，以及性染色体和线粒体别名）。
    """
    input_chrom_str = str(input_chrom)
    input_chrom_upper = input_chrom_str.upper()

    aliases_to_try = [input_chrom_str]
    if input_chrom_upper.startswith('CHR'):
        aliases_to_try.append(input_chrom_str[3:]) 
    else:
        aliases_to_try.append(f'chr{input_chrom_str}') 

    if input_chrom_upper == '23' or input_chrom_upper == 'CHRX':
        aliases_to_try.extend(['X', 'chrX'])
    elif input_chrom_upper == '24' or input_chrom_upper == 'CHRY':
        aliases_to_try.extend(['Y', 'chrY'])

    if input_chrom_upper in ['M', 'MT', 'CHRM', 'CHRMT']:
        aliases_to_try.extend(['M', 'MT', 'chrM', 'chrMT'])

    final_aliases = []
    seen = set()
    for alias in aliases_to_try:
        if alias not in seen:
            final_aliases.append(alias)
            seen.add(alias)

    for alias in final_aliases:
        if alias in ref_genome:
            return alias

    return None

def get_mutation_core_string(row):
    """
    根据 variantType 和 referenceAllele/variantAllele 生成核心突变字符串。
    例如：C>T, delC, insT
    同时过滤掉 A>A 这种无效 SNP 和 UNKNOWN 的 Indel。
    """
    ref = str(row['referenceAllele']).upper()
    alt = str(row['variantAllele']).upper()
    var_type = str(row['variantType'])

    if var_type == 'SNP':
        if ref == alt: # A>A case, as requested by user
            return INVALID_SNP_CORE
        return f"{ref}>{alt}"
    elif var_type == 'DEL':
        if ref != '-' and alt != '-':
            # Example: REF=CG, ALT=C -> deleted_part = G
            deleted_part = ref[len(alt):]
            if not deleted_part or deleted_part == 'N': # Check for empty or 'N'
                return INVALID_INDEL_CORE
            return f"del{deleted_part}"
        elif ref != '-' and alt == '-':
            # Example: REF=C, ALT=- -> deleted_part = C
            if not ref or ref == 'N':
                return INVALID_INDEL_CORE
            return f"del{ref}"
        else: # Both ref and alt are '-' or other invalid state for DEL
            return INVALID_INDEL_CORE
    elif var_type == 'INS':
        if ref != '-' and alt != '-':
            # Example: REF=C, ALT=CT -> inserted_part = T
            inserted_part = alt[len(ref):]
            if not inserted_part or inserted_part == 'N': # Check for empty or 'N'
                return INVALID_INDEL_CORE
            return f"ins{inserted_part}"
        elif ref == '-' and alt != '-':
            # Example: REF=-, ALT=C -> inserted_part = C
            if not alt or alt == 'N':
                return INVALID_INDEL_CORE
            return f"ins{alt}"
        else: # Both ref and alt are '-' or other invalid state for INS
            return INVALID_INDEL_CORE
    else: # Fallback for other variant types not explicitly handled
        return f"{ref}>{alt}" # Keep original behavior for unhandled types, will be filtered later if invalid

def _normalize_chrom_for_filter(chrom_str):
    """
    将染色体名称标准化为简洁形式，用于判断是否为标准染色体。
    例如：'chr1' -> '1', 'chrX' -> 'X, 'chrM' -> 'MT'
    """
    s = str(chrom_str).replace('chr', '').upper()
    if s == 'M': 
        return 'MT'
    return s

# --- MODIFIED: get_3mer_context_for_all_types renamed and logic updated for 5-mer ---
def get_5mer_context_for_all_types(row, ref_genomes_dict, window=2): 
    """
    从参考基因组获取突变位点周围的五核苷酸上下文。
    此函数现在处理多种变异类型，包括单碱基和多碱基的SNP、插入和删除。
    上下文格式为 pre_base_2 pre_base_1 [mutation_core_str] post_base_1 post_base_2。
    """
    ref_allele = str(row['referenceAllele']).upper()

    chrom_original = str(row['chromosome'])
    pos = row['startPosition'] # 1-based start position
    build = str(row['ncbiBuild'])

    if build not in ref_genomes_dict: 
        warnings.warn(f"参考基因组FASTA未加载，版本 '{build}'。跳过 {chrom_original}:{pos} 的5-mer上下文获取。")
        return 'N', 'N', 'N', 'N', 'N' # Return 5 'N's for 5-mer context
    
    ref_genome = ref_genomes_dict[build]
    fasta_chrom = _get_fasta_chrom_alias(chrom_original, ref_genome)
    if fasta_chrom is None: 
        warnings.warn(f"染色体 '{chrom_original}' 在版本 '{build}' 的FASTA中未找到（尝试常见别名后）。跳过 {chrom_original}:{pos} 的5-mer上下文获取。")
        return 'N', 'N', 'N', 'N', 'N' # Return 5 'N's
    
    try:
        center_p0 = pos - 1 # 0-based index for the start of the mutation in FASTA

        chrom_len = len(ref_genome[fasta_chrom])
        
        # Extract pre-mutation bases (2 bases before)
        pre_base_2_idx = center_p0 - 2
        pre_base_1_idx = center_p0 - 1
        
        pre_base_2 = ref_genome[fasta_chrom][pre_base_2_idx].upper() if pre_base_2_idx >= 0 and pre_base_2_idx < chrom_len else 'N'
        pre_base_1 = ref_genome[fasta_chrom][pre_base_1_idx].upper() if pre_base_1_idx >= 0 and pre_base_1_idx < chrom_len else 'N'

        # Extract post-mutation bases (2 bases after)
        current_ref_allele_len = len(ref_allele) if ref_allele != '-' else 0
        post_base_1_idx = center_p0 + current_ref_allele_len 
        post_base_2_idx = center_p0 + current_ref_allele_len + 1
        
        post_base_1 = ref_genome[fasta_chrom][post_base_1_idx].upper() if post_base_1_idx >= 0 and post_base_1_idx < chrom_len else 'N'
        post_base_2 = ref_genome[fasta_chrom][post_base_2_idx].upper() if post_base_2_idx >= 0 and post_base_2_idx < chrom_len else 'N'
        
        return pre_base_2, pre_base_1, 'N', post_base_1, post_base_2 # 'N' for center_base_placeholder
    except Exception as e:
        warnings.warn(f"获取 {build} {chrom_original}:{pos} 的5-mer上下文时出错。错误: {e}。返回 'N','N','N','N','N'。")
        return 'N', 'N', 'N', 'N', 'N' 
# --- END MODIFIED ---

def apply_initial_preprocessing(df: pd.DataFrame, dataset_name: str) -> pd.DataFrame:
    """
    对单个数据集应用初始的预处理步骤，包括去重、过滤和上下文提取。
    保留生成全局ID所需的原始字符串列。
    """
    print(f"\n--- 正在处理数据集: {dataset_name} ---")
    print(f"原始 {dataset_name} 数据形状: {df.shape}")

    # --- 样本内突变去重 ---
    dedup_cols = ['sampleId', 'chromosome', 'startPosition', 'referenceAllele', 'variantAllele', 'variantType']
    initial_rows_dedup = df.shape[0]
    df.drop_duplicates(subset=dedup_cols, inplace=True)
    removed_dedup_rows = initial_rows_dedup - df.shape[0]
    if removed_dedup_rows > 0:
        print(f"  已移除 {removed_dedup_rows} 行 {dataset_name} 中重复的样本内突变数据。")
    else:
        print(f"  {dataset_name} 未发现重复的样本内突变数据。")
    print(f"  去重后 {dataset_name} 数据形状: {df.shape}")

    # --- 标准化 ncbiBuild 列 (仅用于上下文获取，不保留在最终输出) ---
    if 'ncbiBuild' in df.columns:
        df['ncbiBuild'] = df['ncbiBuild'].replace({'37': 'GRCh37', '38': 'GRCh38'}).astype(str)
        initial_rows_nan_filter = df.shape[0] 
        df = df[df['ncbiBuild'] != 'nan'].copy()
        removed_nan_rows = initial_rows_nan_filter - df.shape[0]
        if removed_nan_rows > 0:
            print(f"  已移除 {removed_nan_rows} 行 {dataset_name} 中 'ncbiBuild' 为 'nan' 的突变数据。")
        else:
            print(f"  {dataset_name} 未发现 'ncbiBuild' 为 'nan' 的突变数据。")
    else:
        warnings.warn(f"数据集 {dataset_name} 缺少 'ncbiBuild' 列，假定所有突变均为 GRCh38。")
        df['ncbiBuild'] = 'GRCh38' 

    # --- 过滤非标准染色体 ---
    print(f"  正在过滤 {dataset_name} 中的非标准染色体...")
    initial_rows_chrom_filter = df.shape[0]
    df['normalized_chrom_for_filter'] = df['chromosome'].astype(str).apply(_normalize_chrom_for_filter)
    df = df[df['normalized_chrom_for_filter'].isin(ALLOWED_NORMALIZED_CHROMS)].copy()
    df = df.drop(columns=['normalized_chrom_for_filter'])
    removed_chrom_rows = initial_rows_chrom_filter - df.shape[0]
    if removed_chrom_rows > 0:
        print(f"  已移除 {removed_chrom_rows} 行 {dataset_name} 中非标准染色体突变数据（保留：1-22, X, Y, MT）。")
    else:
        print(f"  {dataset_name} 未发现非标准染色体突变数据。")
    print(f"  过滤非标准染色体后 {dataset_name} 数据形状: {df.shape}")

    # --- MODIFIED: Filter for *exactly* the 5 desired mutation types and remove 'nan' strings ---
    print(f"  正在过滤 {dataset_name} 中非目标类型的突变 (只保留5种特定类型，并移除包含'nan'或无效字符的突变)...")
    initial_rows_mutation_type_filter = df.shape[0]

    # 预处理原始的 referenceAllele 和 variantAllele，将 NaN 转换为 '-'
    df['referenceAllele'] = df['referenceAllele'].fillna('-').astype(str)
    df['variantAllele'] = df['variantAllele'].fillna('-').astype(str)
    df['referenceAllele_str'] = df['referenceAllele'].str.upper() 
    df['variantAllele_str'] = df['variantAllele'].str.upper() 
    # Identify cases where variantType is 'SNP' but ALT is '-' (indicates deletion)
    misclassified_deletion_as_snp_mask = (df['variantType'] == 'SNP') & (df['variantAllele_str'] == '-')
    if misclassified_deletion_as_snp_mask.any():
        print(f"  正在修正 {misclassified_deletion_as_snp_mask.sum()} 行误分类为 SNP 的删除突变 (ALT='-')。")
        df.loc[misclassified_deletion_as_snp_mask, 'variantType'] = 'DEL'

    # Identify cases where variantType is 'SNP' but REF is '-' (indicates insertion)
    misclassified_insertion_as_snp_mask = (df['variantType'] == 'SNP') & (df['referenceAllele_str'] == '-')
    if misclassified_insertion_as_snp_mask.any():
        print(f"  正在修正 {misclassified_insertion_as_snp_mask.sum()} 行误分类为 SNP 的插入突变 (REF='-')。")
        df.loc[misclassified_insertion_as_snp_mask, 'variantType'] = 'INS'

    print(f"  正在过滤 {dataset_name} 中 REF/ALT 包含 'nan' 或无效字符的突变...")
    initial_rows_char_filter = df.shape[0]

    # Check for 'nan' substring (case-insensitive) in the pre-processed strings
    contains_nan_ref = df['referenceAllele_str'].str.contains('NAN', case=False, na=False)
    contains_nan_alt = df['variantAllele_str'].str.contains('NAN', case=False, na=False)

    # Check for characters other than A, T, C, G, - (e.g., '>', 'N', other symbols)
    # This pattern ensures only valid DNA bases or '-' are present.
    pattern_valid_bases = r'^[ATCG-]+$'
    is_valid_ref_bases = df['referenceAllele_str'].str.match(pattern_valid_bases, na=False)
    is_valid_alt_bases = df['variantAllele_str'].str.match(pattern_valid_bases, na=False)

    # Combine all invalid conditions for filtering
    invalid_char_mask = contains_nan_ref | contains_nan_alt | ~is_valid_ref_bases | ~is_valid_alt_bases
    
    df = df[~invalid_char_mask].copy()

    removed_char_rows = initial_rows_char_filter - df.shape[0]
    if removed_char_rows > 0:
        print(f"  已移除 {removed_char_rows} 行 {dataset_name} 中 REF/ALT 包含 'nan' 或无效字符的突变。")
    else:
        print(f"  {dataset_name} 未发现 REF/ALT 包含 'nan' 或无效字符的突变。")
    print(f"  过滤异常字符后 {dataset_name} 数据形状: {df.shape}")

    print(f"  正在应用5种特定突变类型过滤...")
    initial_rows_5_type_filter = df.shape[0]

    # 1. SNP: REF=C, ALT=T (REF len=1, ALT len=1)
    is_snp = (df['variantType'] == 'SNP') & \
             (df['referenceAllele_str'].str.len() == 1) & \
             (df['variantAllele_str'].str.len() == 1)

    # 2. Insertion (标准): REF=C, ALT=CT (REF len=1, ALT len=2, ALT starts with REF)
    is_vcf_insertion = (df['variantType'] == 'INS') & \
                       (df['referenceAllele_str'].str.len() == 1) & \
                       (df['variantAllele_str'].str.len() == 2) & \
                       (df['variantAllele_str'].str[0] == df['referenceAllele_str']) # Corrected logic

    # 3. Deletion (标准): REF=CG, ALT=C (REF len=2, ALT len=1, REF starts with ALT)
    is_vcf_deletion = (df['variantType'] == 'DEL') & \
                      (df['referenceAllele_str'].str.len() == 2) & \
                      (df['variantAllele_str'].str.len() == 1) & \
                      (df['referenceAllele_str'].str[0] == df['variantAllele_str']) # Corrected logic

    # 4. Insertion (非标准): REF=-, ALT=C (REF is '-', ALT len=1)
    is_non_standard_insertion = (df['variantType'] == 'INS') & \
                                (df['referenceAllele_str'] == '-') & \
                                (df['variantAllele_str'].str.len() == 1)

    # 5. Deletion (非标准): REF=C, ALT=- (REF len=1, ALT is '-')
    is_non_standard_deletion = (df['variantType'] == 'DEL') & \
                               (df['referenceAllele_str'].str.len() == 1) & \
                               (df['variantAllele_str'] == '-')
    
    df = df[is_snp | is_vcf_insertion | is_non_standard_insertion | is_vcf_deletion | is_non_standard_deletion].copy()

    removed_5_type_rows = initial_rows_5_type_filter - df.shape[0]
    if removed_5_type_rows > 0:
        print(f"  已移除 {removed_5_type_rows} 行 {dataset_name} 中不属于5种目标类型的突变。")
    else:
        print(f"  {dataset_name} 未发现需要移除的非目标突变类型。")
    print(f"  最终过滤后 {dataset_name} 数据形状: {df.shape}")
    df = df.drop(columns=['referenceAllele_str', 'variantAllele_str'])
    # --- END MODIFIED ---

    # --- MODIFIED: 获取五核苷酸上下文 --- 
    print(f"  正在为 {dataset_name} 获取五核苷酸上下文...") 
    ref_genomes = load_all_ref_genomes(REFERENCE_GENOME_FASTAS) 
    tqdm.pandas(desc=f"获取 {dataset_name} 5-mer上下文") 
    
    df[['pre_base_2', 'pre_base_1', 'center_base_placeholder', 'post_base_1', 'post_base_2']] = df.progress_apply(
        lambda row: get_5mer_context_for_all_types(row, ref_genomes, window=2), # window=2 for 5-mer context
        axis=1, result_type='expand'
    )
    df['mutation_core_str'] = df.apply(get_mutation_core_string, axis=1) # This function is updated to filter A>A and UNKNOWN
    df['pentanucleotide_context_str'] = df.apply( # Changed column name
        lambda row: f"{row['pre_base_2']}{row['pre_base_1']}[{row['mutation_core_str']}]{row['post_base_1']}{row['post_base_2']}", 
        axis=1
    )
    df = df.drop(columns=['pre_base_2', 'pre_base_1', 'center_base_placeholder', 'post_base_1', 'post_base_2', 'mutation_core_str']) 

    print(f"  正在过滤 {dataset_name} 中无效的五核苷酸上下文 (包含 '{INVALID_SNP_CORE}', '{INVALID_INDEL_CORE}')...")
    initial_rows_invalid_context_filter = df.shape[0]

    invalid_context_mask = (
        df['pentanucleotide_context_str'].str.contains('UNKNOWN', case=False, na=False) |
        df['pentanucleotide_context_str'].str.contains(INVALID_SNP_CORE, case=False, na=False) |
        df['pentanucleotide_context_str'].str.contains(INVALID_INDEL_CORE, case=False, na=False)
    )
    df = df[~invalid_context_mask].copy()

    removed_invalid_context_rows = initial_rows_invalid_context_filter - df.shape[0]
    if removed_invalid_context_rows > 0:
        print(f"  已移除 {removed_invalid_context_rows} 行 {dataset_name} 中无效的五核苷酸上下文。")
    else:
        print(f"  {dataset_name} 未发现无效的五核苷酸上下文。")
    print(f"  过滤无效上下文后 {dataset_name} 数据形状: {df.shape}")

    print(f"--- {dataset_name} 初始预处理完成。---")
    return df

def prepare_mutation_features_and_ids(cbioportal_csv_path: str, genie_csv_path: str, ref_genome_paths_dict: dict, output_dir: str, clinical_df_path: Optional[str] = None):
    """
    读取 cBioPortal 和 GENIE 原始突变数据，进行预处理，合并，并生成全局ID映射。
    此函数不生成长序列上下文。
    """
    # 1. 加载原始数据
    df_cbioportal_raw = pd.read_csv(cbioportal_csv_path, low_memory=False)
    df_genie_raw = pd.read_csv(genie_csv_path, low_memory=False)

    # 2. 对每个数据集应用初始预处理
    df_cbioportal_processed = apply_initial_preprocessing(df_cbioportal_raw, "cBioPortal")
    df_genie_processed = apply_initial_preprocessing(df_genie_raw, "GENIE")

    # 3. 合并两个数据集
    print("\n--- 正在合并两个数据集 ---")
    df_combined = pd.concat([df_cbioportal_processed, df_genie_processed], ignore_index=True)
    print(f"合并后原始数据形状: {df_combined.shape}")

    # 4. 全局去重 (在合并后进行，确保跨数据集的唯一性)
    print("正在进行全局去重...")
    dedup_cols_global = ['sampleId', 'chromosome', 'startPosition', 'referenceAllele', 'variantAllele', 'variantType']
    initial_rows_global_dedup = df_combined.shape[0]
    df_combined.drop_duplicates(subset=dedup_cols_global, inplace=True) 
    removed_global_dedup_rows = initial_rows_global_dedup - df_combined.shape[0]
    if removed_global_dedup_rows > 0:
        print(f"已移除 {removed_global_dedup_rows} 行全局重复的突变数据。")
    else:
        print("未发现全局重复的突变数据。")
    print(f"全局去重后数据形状: {df_combined.shape}")

    # 过滤突变数量过少的样本 ---
    MIN_MUTATIONS_PER_SAMPLE = 5 
    print(f"\n正在过滤突变数量少于 {MIN_MUTATIONS_PER_SAMPLE} 的样本...")
    
    sample_mutation_counts = df_combined.groupby('sampleId').size().reset_index(name='mutation_count')
    
    samples_to_keep = sample_mutation_counts[sample_mutation_counts['mutation_count'] >= MIN_MUTATIONS_PER_SAMPLE]['sampleId']
    
    initial_samples_before_filter = len(df_combined['sampleId'].unique())
    df_combined = df_combined[df_combined['sampleId'].isin(samples_to_keep)].copy()
    
    removed_samples = initial_samples_before_filter - len(df_combined['sampleId'].unique())
    removed_mutations = initial_rows_global_dedup - df_combined.shape[0] 
    
    if removed_samples > 0:
        print(f"  已移除 {removed_samples} 个样本，因为其突变数量少于 {MIN_MUTATIONS_PER_SAMPLE} (共移除 {removed_mutations} 条突变记录)。")
    else:
        print(f"  未发现突变数量少于 {MIN_MUTATIONS_PER_SAMPLE} 的样本。")
    print(f"过滤低突变样本后数据形状: {df_combined.shape}")
    
    # 5. 生成全局 ID 映射和特征列
    print("\n--- 正在生成全局特征ID ---")

    # 5.1. 基因组位置ID (chromosome_id)
    print("  正在生成基因组位置ID...")
    df_combined['chromosome'] = df_combined['chromosome'].astype(str)
    df_combined['chromosome_id'] = df_combined['chromosome'].map(CHROM_TO_NUMERIC_ID)

    initial_rows_unmapped_chrom_filter = df_combined.shape[0]
    df_combined.dropna(subset=['chromosome_id'], inplace=True)
    removed_unmapped_chrom_rows = initial_rows_unmapped_chrom_filter - df_combined.shape[0]
    if removed_unmapped_chrom_rows > 0:
        print(f"  已移除 {removed_unmapped_chrom_rows} 行无法映射到标准数字ID的染色体数据。")
    else:
        print("  未发现无法映射的染色体数据。")
    df_combined['chromosome_id'] = df_combined['chromosome_id'].astype(int)

    # 生成基因组位置分箱ID
    df_combined['pos_bin_id'] = (df_combined['startPosition'] // BP_BIN_SIZE).clip(upper=MAX_POS_BIN)
    print(f"  已生成基因组位置分箱ID，分箱大小 {BP_BIN_SIZE}，最大分箱ID {MAX_POS_BIN}。")


    # --- MODIFIED: 五核苷酸上下文ID ---
    print("  正在生成五核苷酸上下文ID...") 
    unique_pentanucleotide_contexts = df_combined['pentanucleotide_context_str'].unique() 
    # 让真实上下文ID从1开始，0保留给PAD
    pentanucleotide_context_to_id = {val: i + 1 for i, val in enumerate(unique_pentanucleotide_contexts)} 
    with open(Path(output_dir) / "pentanucleotide_context_to_id.json", "w") as f: # Changed filename
        json.dump(pentanucleotide_context_to_id, f, indent=4) 
    df_combined['pentanucleotide_context_id'] = df_combined['pentanucleotide_context_str'].map(pentanucleotide_context_to_id) # Changed column name
    print(f"  已生成 {len(unique_pentanucleotide_contexts)} 种独特的五核苷酸上下文特征。") 
    # --- END MODIFIED ---

    # 5.3. 基因ID (保留生成，但其二元信息将融合到exon_strand_id中)
    print("  正在生成基因ID...")
    df_combined['entrezGeneId'] = pd.to_numeric(df_combined['entrezGeneId'], errors='coerce')
    unique_gene_ids = df_combined['entrezGeneId'].unique()
    gene_to_id = {int(val): i for i, val in enumerate(unique_gene_ids) if pd.notna(val)}
    with open(Path(output_dir) / "gene_to_id.json", "w") as f:
        json.dump(gene_to_id, f, indent=4)
    df_combined['gene_id'] = df_combined['entrezGeneId'].map(gene_to_id).fillna(-1).astype(int)

    df_combined['is_gene_annotated_flag'] = (df_combined['gene_id'] != -1).astype(int) 

    # 5.4. 结合外显子标志、链方向和基因注释标志
    print("  正在生成结合外显子标志、链方向和基因注释标志的特征 ...")
    df_combined['in_exon_flag_temp'] = df_combined['variantClassification'].map(VARIANT_CLASSIFICATION_TO_EXON_FLAG)

    df_combined['strand'] = df_combined['strand'].astype(str).replace({'nan': np.nan})

    def combine_exon_strand_gene_optimized(row):
        exon_flag = row['in_exon_flag_temp']
        if pd.isna(exon_flag):
            exon_status = "MissingExonInfo" 
        elif exon_flag == 1:
            exon_status = "Exon"
        else: 
            exon_status = "Intron"

        strand_val = str(row['strand']) if pd.notna(row['strand']) else "Unknown" 

        gene_annotated_status = "GeneAnnotated" if row['is_gene_annotated_flag'] == 1 else "NoGeneAnnotated"

        return f"{exon_status}_{strand_val}_{gene_annotated_status}"

    df_combined['exon_strand_str'] = df_combined.apply(combine_exon_strand_gene_optimized, axis=1)

    unique_exon_strand_combos = df_combined['exon_strand_str'].unique().tolist()

    exon_strand_to_id = {} # 在使用前初始化字典

    pad_combo = "MissingExonInfo_Unknown_NoGeneAnnotated"
    if pad_combo in unique_exon_strand_combos:
        exon_strand_to_id[pad_combo] = 0
        unique_exon_strand_combos.remove(pad_combo) 
    else:
        warnings.warn(f"数据中未发现组合 '{pad_combo}'。将其强制映射为 ID 0。")
        exon_strand_to_id[pad_combo] = 0

    current_id = 1
    for val in sorted(unique_exon_strand_combos): 
        exon_strand_to_id[val] = current_id
        current_id += 1
    
    with open(Path(output_dir) / "exon_strand_to_id.json", "w") as f:
        json.dump(exon_strand_to_id, f, indent=4)
    df_combined['exon_strand_id'] = df_combined['exon_strand_str'].map(exon_strand_to_id).fillna(0).astype(int)
    
    df_combined = df_combined.drop(columns=['in_exon_flag_temp', 'strand', 'is_gene_annotated_flag', 'exon_strand_str'])
    print("  正在生成测序方法ID...")
    # 从 clinical_df_path 加载原始临床数据以获取所有可能的测序方法字符串
    if clinical_df_path:
        df_clinical_full = pd.read_parquet(clinical_df_path)
        col_sequencing_method_str = 'sequencing_method_str' 
        if col_sequencing_method_str in df_clinical_full.columns:
            unique_sequencing_methods = df_clinical_full[col_sequencing_method_str].dropna().unique().tolist()
            # Ensure 'Unknown' is always present and mapped to 0
            sequencing_method_to_id = {'Unknown': 0}
            current_id = 1
            for method in sorted(unique_sequencing_methods):
                if method not in sequencing_method_to_id:
                    sequencing_method_to_id[method] = current_id
                    current_id += 1
            with open(Path(output_dir) / "sequencing_method_str_to_id.json", "w") as f:
                json.dump(sequencing_method_to_id, f, indent=4)
            print(f"  已生成 {len(sequencing_method_to_id)} 种独特的测序方法特征。")
        else:
            warnings.warn(f"临床数据文件 '{clinical_df_path}' 缺少 '{col_sequencing_method_str}' 列。无法生成测序方法ID映射。将创建仅包含'Unknown'的默认映射。")
            # Create a dummy one if not found to prevent downstream errors
            sequencing_method_to_id = {'Unknown': 0}
            with open(Path(output_dir) / "sequencing_method_str_to_id.json", "w") as f:
                json.dump(sequencing_method_to_id, f, indent=4)
    else:
        warnings.warn("未提供 clinical_df_path，无法生成测序方法ID映射。将创建仅包含'Unknown'的默认映射。")
        # Create a dummy one if no clinical_df_path is provided
        sequencing_method_to_id = {'Unknown': 0}
        with open(Path(output_dir) / "sequencing_method_str_to_id.json", "w") as f:
            json.dump(sequencing_method_to_id, f, indent=4)

    print("--- 全局特征ID生成完成。---")

    # 6. 整理最终的DataFrame
    print("\n--- 整理最终数据并保存 ---")
    df_combined = df_combined.sort_values(by=['sampleId', 'chromosome_id', 'startPosition'])

    # 定义最终要保留的列
    processed_cols = [
        'sampleId', 'chromosome_id', 
        'exon_strand_id', 
        'pentanucleotide_context_id',  
        'pentanucleotide_context_str',  
        'studyId',
        'chromosome', 
        'startPosition', 
        'referenceAllele', 
        'variantAllele', 
        'variantType', 
        'pos_bin_id', 
    ]
    processed_cols = [col for col in processed_cols if col in df_combined.columns]
    df_processed = df_combined[processed_cols]

    # 保存中间文件，不包含长序列
    output_path_base_features = Path(output_dir) / "combined_processed_mutations_base_features.parquet"
    print(f"正在保存所有处理后的基础特征数据到 {output_path_base_features} ({df_processed.shape[0]} 行)...")
    df_processed.to_parquet(output_path_base_features, index=False)
    print("基础特征数据保存完成。")

    # 保存 num_pos_bins
    num_pos_bins = MAX_POS_BIN + 1 
    with open(Path(output_dir) / "num_pos_bins.json", "w") as f:
        json.dump({"num_pos_bins": num_pos_bins}, f, indent=4)
    print(f"  已保存 num_pos_bins ({num_pos_bins}) 到 num_pos_bins.json。")

    print("\n所有基础特征预处理和ID生成任务完成。")

if __name__ == '__main__':
    load_all_ref_genomes(REFERENCE_GENOME_FASTAS)

    # IMPORTANT: You need to replace this with the actual path to your clinical data parquet file.
    # This file should contain a column with raw sequencing method strings, e.g., 'sequencing_method_str'.
    CLINICAL_DF_PATH = "/gpfs/flash/home/yzq/project/model/MutaGPT/data/processed/combined_processed_clinical_data_0824/combined_processed_clinical_samples.parquet" 

    prepare_mutation_features_and_ids(
        CBIOPORTAL_INPUT_CSV_PATH,
        GENIE_INPUT_CSV_PATH,
        REFERENCE_GENOME_FASTAS,
        COMBINED_OUTPUT_DIR,
        clinical_df_path=CLINICAL_DF_PATH # Pass the clinical data path
    )