"""临床数据合并和预处理模块。

该模块用于处理和合并来自不同数据源（cBioPortal和GENIE）的临床数据，
包括数据标准化、分类变量映射和最终数据集生成。

主要功能：
- 加载和标准化临床数据
- 分类变量到全局ID的映射
- 多数据源临床数据合并
- 生成词汇表和映射文件
"""

import pandas as pd
import numpy as np
from pathlib import Path
import json
import warnings
from tqdm import tqdm
from typing import Dict, Optional, Tuple, List

# --- 配置参数 ---
# 输入临床数据CSV文件路径
CBIOPORTAL_INPUT_CLINICAL_CSV_PATH = "/gpfs/flash/home/yzq/project/model/Mutabert/data/raw/cbioportal_data/cbioportal_all_clinical_sample_data.csv"
GENIE_INPUT_CLINICAL_CSV_PATH = "/gpfs/flash/home/yzq/project/model/Mutabert/data/raw/Release_16.1_public/data_clinical_sample.txt"

# 处理后的临床数据（parquet文件）的输出目录，这个目录将存放最终的 combined_processed_clinical_samples.parquet
CLINICAL_DATA_OUTPUT_DIR = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/data/processed/combined_processed_clinical_data"
Path(CLINICAL_DATA_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)

# 所有ID映射文件（JSON）的输出目录,放在突变的预处理目录下
VOCAB_OUTPUT_DIR = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/data/processed/combined_mutation_data_0909"
Path(VOCAB_OUTPUT_DIR).mkdir(parents=True, exist_ok=True) # 确保目录存在

# 癌症类型映射文件的路径 (现在从 VOCAB_OUTPUT_DIR 读取/写入)
CANCER_TYPE_MAPPING_FILE = Path(VOCAB_OUTPUT_DIR) / "cancer_type_mapping.json"

# cBioPortal 测序方法分类结果文件的路径
VOCAB_OUTPUT_DIR2 = "/gpfs/flash/home/yzq/project/model/SomaticMutaGPT/src/scripts"
CBIO_SEQUENCING_METHODS_FILE = Path(VOCAB_OUTPUT_DIR2) / "cbioportal_study_sequencing_methods_verified_scrape.txt3"

# GENIE临床数据列名映射
GENIE_CLINICAL_COLUMN_MAPPING = {
    'SAMPLE_ID': 'sampleId',
    'PATIENT_ID': 'studyId',
    'CANCER_TYPE': 'clin_CANCER_TYPE',
    'CANCER_TYPE_DETAILED': 'clin_CANCER_TYPE_DETAILED',
    'ONCOTREE_CODE': 'clin_ONCOTREE_CODE',
    'AGE_AT_SEQ_REPORT': 'clin_AGE_AT_SEQ_REPORTED_YEARS',
    'SEQ_ASSAY_ID': 'clin_SEQ_ASSAY_ID',
}

# 定义所有数据集中期望的最终临床特征列（标准化后的名称）
STANDARDIZED_CLINICAL_FEATURES = [
    'sampleId',
    'studyId',
    'clin_CANCER_TYPE',
    'clin_CANCER_TYPE_DETAILED',
    'clin_TUMOR_PURITY',
    'clin_ONCOTREE_CODE',
    'clin_AGE_AT_SEQ_REPORTED_YEARS',
    'clin_GENE_PANEL',
    'clin_SEQ_ASSAY_ID',
]

# 定义未分类标签
UNCLASSIFIED_LABEL = "Unclassified_Cancer"

# --- 辅助函数 ---
# (以下函数保持不变，因为它们只使用上面定义的配置变量)
# ... [map_categorical_to_global_id, load_and_standardize_clinical_data, load_cbioportal_sequencing_methods, combine_and_preprocess_clinical_data, main 保持不变]
# 为了简洁，这里只显示修改的配置部分。实际脚本中，请确保所有函数都包含。

def map_categorical_to_global_id(df: pd.DataFrame, column_name: str, vocab_output_dir: str,
                                fill_value: str = 'Unknown', id_start_from: int = 0,
                                custom_vocab: Optional[List[str]] = None) -> Tuple[pd.DataFrame, Dict]:
    """
    将DataFrame中的分类列映射为整数ID，并保存全局映射文件。
    此函数用于在合并后的数据集上生成全局ID。
    vocab_output_dir: 专门用于保存ID映射文件的目录。
    custom_vocab: 可选的自定义词汇表列表，如果提供，将使用此列表来创建ID映射。

    Raises:
        ValueError: 当输入参数无效时
        RuntimeError: 当映射处理失败时
    """
    try:
        if df.empty:
            raise ValueError("输入数据框为空")
        if column_name not in df.columns:
            raise ValueError(f"列 '{column_name}' 在数据框中不存在")

        vocab_path = Path(vocab_output_dir)
        if not vocab_path.exists():
            vocab_path.mkdir(parents=True, exist_ok=True)

        original_col = df[column_name]
        original_na_count = original_col.isna().sum()

        df[column_name] = original_col.fillna(fill_value).astype(str)
        if original_na_count > 0:
            print(f"已将 {original_na_count} 个缺失值填充为 '{fill_value}'")

        if custom_vocab:
            unique_values = list(custom_vocab) # 使用副本以防修改原始列表
            # 确保 fill_value 在 custom_vocab 中且排在前面（如果 id_start_from 为 0）
            if id_start_from == 0 and fill_value not in unique_values:
                unique_values.insert(0, fill_value)
            elif id_start_from != 0 and fill_value not in unique_values:
                unique_values.append(fill_value) # 如果ID不从0开始，且fill_value不在，则添加在末尾
            
            # 移除重复项并保持顺序
            seen = set()
            unique_values = [x for x in unique_values if not (x in seen or seen.add(x))]

        else:
            unique_values = df[column_name].unique().tolist()
            if not unique_values:
                raise ValueError(f"列 '{column_name}' 没有有效值")

            if id_start_from == 0 and fill_value not in unique_values:
                unique_values.insert(0, fill_value)
            elif id_start_from != 0 and fill_value not in unique_values:
                unique_values.append(fill_value)

        value_to_id = {val: i + id_start_from for i, val in enumerate(unique_values)}

        map_file_path = Path(vocab_output_dir) / f"{column_name}_to_id.json"
        try:
            with open(map_file_path, "w", encoding='utf-8') as f:
                json.dump(value_to_id, f, indent=4, ensure_ascii=False)
            print(f"已为 '{column_name}' 生成 {len(value_to_id)} 个全局ID，映射文件保存至 {map_file_path}")
        except Exception as e:
            raise RuntimeError(f"保存映射文件失败: {e}") from e

        df[f'{column_name}_id'] = df[column_name].map(value_to_id)

        if df[f'{column_name}_id'].isna().any():
            unmapped_values = df[df[f'{column_name}_id'].isna()][column_name].unique().tolist()
            warnings.warn(f"映射过程中出现未映射的值: {unmapped_values}。这些值将被填充为 {id_start_from} (Unknown/PAD ID)。")
            df[f'{column_name}_id'].fillna(id_start_from, inplace=True)

        return df, value_to_id

    except (ValueError, RuntimeError):
        raise
    except Exception as e:
        raise RuntimeError(f"映射分类列 '{column_name}' 时发生未知错误: {e}") from e

def load_and_standardize_clinical_data(input_path: str, dataset_type: str, col_mapping: dict = None) -> pd.DataFrame:
    """
    加载临床数据并进行初步标准化，包括列重命名和选择公共列。
    同时添加一个 'source_dataset' 列来标识数据来源。

    Raises:
        FileNotFoundError: 当输入文件不存在时
        ValueError: 当数据集类型未知或数据无效时
        RuntimeError: 当数据加载或处理失败时
    """
    print(f"\n--- 正在加载和标准化 {dataset_type} 临床数据 ---")

    try:
        input_file = Path(input_path)
        if not input_file.exists():
            raise FileNotFoundError(f"输入文件不存在: {input_file}")

        if dataset_type == "genie":
            try:
                df = pd.read_csv(input_path, sep='\t', low_memory=False, header=4)
                if col_mapping:
                    filtered_col_mapping = {k: v for k, v in col_mapping.items() if k in df.columns}
                    df.rename(columns=filtered_col_mapping, inplace=True)
                    print(f"  {dataset_type} 数据已根据映射重命名列。")
            except Exception as e:
                raise RuntimeError(f"加载GENIE数据失败: {e}") from e
        elif dataset_type == "cbioportal":
            try:
                df = pd.read_csv(input_path, low_memory=False)
            except Exception as e:
                raise RuntimeError(f"加载cBioPortal数据失败: {e}") from e
        else:
            raise ValueError(f"未知数据集类型: {dataset_type}")

        if df.empty:
            raise ValueError(f"{dataset_type} 数据为空")

        print(f"  原始 {dataset_type} 数据形状: {df.shape}")

    except (FileNotFoundError, ValueError, RuntimeError):
        raise
    except Exception as e:
        raise RuntimeError(f"加载和标准化 {dataset_type} 数据时发生未知错误: {e}") from e

    df['source_dataset'] = dataset_type

    df_standardized = pd.DataFrame()
    for col in STANDARDIZED_CLINICAL_FEATURES + ['source_dataset']:
        if col in df.columns:
            df_standardized[col] = df[col]
        else:
            df_standardized[col] = np.nan
            warnings.warn(f"  警告: {dataset_type} 数据中缺少列 '{col}'，已填充NaN。", UserWarning)

    dedup_cols = ['sampleId']
    if all(col in df_standardized.columns for col in dedup_cols):
        initial_rows_dedup = df_standardized.shape[0]
        df_standardized.drop_duplicates(subset=dedup_cols, inplace=True)
        removed_dedup_rows = initial_rows_dedup - df_standardized.shape[0]
        if removed_dedup_rows > 0:
            print(f"  已移除 {removed_dedup_rows} 行 {dataset_type} 中重复的临床样本数据。")
        else:
            print(f"  {dataset_type} 未发现重复的临床样本数据。")
    else:
        warnings.warn(f"  警告: {dataset_type} 数据中缺少去重所需的列 '{dedup_cols}'。跳过去重步骤。", UserWarning)

    print(f"  标准化和去重后 {dataset_type} 数据形状: {df_standardized.shape}")
    return df_standardized

def load_cbioportal_sequencing_methods(file_path: Path) -> Dict[str, str]:
    """
    从指定文件加载 cBioPortal studyId 到测序方法的映射。
    提取纯粹的测序方法名称（WGS/WES/Targeted/Unknown），并将 'Assay' 转换为 'Targeted'。
    此版本增加了对复杂分类字符串（如 'WES: Keyword=Targeted, Scrape=WES'）的解析。
    """
    method_map = {}
    if not file_path.exists():
        warnings.warn(f"警告: 未找到 cBioPortal 测序方法分类文件: {file_path}。将使用默认逻辑处理 cBioPortal 测序方法。")
        return method_map
    
    print(f"正在加载 cBioPortal 测序方法分类文件: {file_path}")
    with open(file_path, 'r', encoding='utf-8') as f:
        for line in f:
            line = line.strip()
            if line:
                parts = line.split(': ', 1)
                if len(parts) == 2:
                    study_id, classification_raw = parts[0], parts[1]
                    
                    # 1. 尝试移除所有可能的后缀，只保留纯粹的分类部分
                    pure_classification_candidate = classification_raw.split(' (')[0]
                    
                    final_method_for_study = "Unknown" # 默认值

                    # 定义优先级用于从复杂字符串中提取
                    priority_methods = ["WGS", "WES", "Targeted"] 
                    
                    # 2. 检查是否是标准分类 (WGS/WES/Targeted/Unknown)
                    if pure_classification_candidate in priority_methods + ["Unknown"]:
                        final_method_for_study = pure_classification_candidate
                    elif pure_classification_candidate == "Assay": # 统一 Assay 为 Targeted
                        final_method_for_study = "Targeted"
                    else:
                        # 3. 如果不是标准分类，尝试从字符串中提取优先级最高的关键词
                        #    例如：'WES: Keyword=Targeted, Scrape=WES'
                        lower_candidate = pure_classification_candidate.lower()
                        
                        found_from_complex = False
                        for method_type in priority_methods:
                            if method_type.lower() in lower_candidate: # 检查是否包含 WGS, WES, Targeted
                                final_method_for_study = method_type
                                found_from_complex = True
                                break # 找到优先级最高的就停止
                        
                        if not found_from_complex:
                            # 如果复杂字符串中也未找到明确的 WGS/WES/Targeted
                            warnings.warn(f"发现未知或复杂分类 '{classification_raw}' for '{study_id}'，将其视为 'Unknown'。")
                            final_method_for_study = "Unknown"
                        else:
                            # 找到了，但不是纯粹的格式，打印信息但不再是警告
                            print(f"  解析复杂分类 '{classification_raw}' for '{study_id}' 为 '{final_method_for_study}'。")

                    method_map[study_id] = final_method_for_study
    print(f"已加载 {len(method_map)} 个 cBioPortal study 的测序方法。")
    return method_map

def combine_and_preprocess_clinical_data(cbioportal_csv_path: str, genie_csv_path: str, clinical_data_output_dir: str, vocab_output_dir: str, cbioportal_sequencing_methods_file: Path):
    """
    加载并合并 cBioPortal 和 GENIE 临床数据，然后进行全局预处理和ID映射。
    clinical_data_output_dir: 最终临床parquet文件的输出目录。
    vocab_output_dir: 所有ID映射JSON文件的输出目录。
    cbioportal_sequencing_methods_file: 包含 cBioPortal 测序方法分类结果的文件路径。

    Raises:
        FileNotFoundError: 当输入文件或映射文件不存在时
        ValueError: 当输入参数无效时
        RuntimeError: 当数据处理失败时
    """
    try:
        clinical_output_path = Path(clinical_data_output_dir)
        vocab_output_path = Path(vocab_output_dir)

        clinical_output_path.mkdir(parents=True, exist_ok=True)
        vocab_output_path.mkdir(parents=True, exist_ok=True)

        print("开始加载数据集...")
        try:
            df_cbioportal = load_and_standardize_clinical_data(cbioportal_csv_path, "cbioportal")
            df_genie = load_and_standardize_clinical_data(genie_csv_path, "genie", GENIE_CLINICAL_COLUMN_MAPPING)
        except Exception as e:
            raise RuntimeError(f"数据加载失败: {e}") from e
    except (FileNotFoundError, ValueError, RuntimeError):
        raise
    except Exception as e:
        raise RuntimeError(f"处理临床数据时发生未知错误: {e}") from e

    print("\n--- 正在合并两个标准化后的临床数据集 ---")
    df_combined = pd.concat([df_cbioportal, df_genie], ignore_index=True)
    print(f"合并后原始数据形状: {df_combined.shape}")

    print("正在进行全局样本去重...")
    initial_rows_global_dedup = df_combined.shape[0]
    df_combined.drop_duplicates(subset=['sampleId'], inplace=True)
    removed_global_dedup_rows = initial_rows_global_dedup - df_combined.shape[0]
    if removed_global_dedup_rows > 0:
        print(f"已移除 {removed_global_dedup_rows} 行全局重复的临床样本数据。")
    else:
        print("未发现全局重复的临床样本数据。")
    print(f"全局去重后数据形状: {df_combined.shape}")

    print("\n--- 正在生成测序方法特征 ---")
    cbioportal_sequencing_map = load_cbioportal_sequencing_methods(cbioportal_sequencing_methods_file)

    def derive_sequencing_method_from_file_or_default(row, cbioportal_seq_map):
        source_dataset = row['source_dataset']
        study_id = row['studyId']

        if source_dataset == 'cbioportal':
            # 直接使用文件中解析出的纯粹分类，如果 studyId 不在文件中，默认为 'Unknown'
            return cbioportal_seq_map.get(study_id, 'Unknown')
        elif source_dataset == 'genie':
            # GENIE 数据集全部是靶向测序，统一为 'Targeted'
            return 'Targeted'
        return 'Unknown' # 兜底

    df_combined['sequencing_method_str'] = df_combined.apply(
        lambda row: derive_sequencing_method_from_file_or_default(row, cbioportal_sequencing_map), axis=1
    )

    # 定义一个自定义的词汇表顺序，确保 'Unknown' 总是 ID 0，
    # 并且现在只有 WGS/WES/Targeted/Unknown 这四种。
    custom_sequencing_vocab = ['Unknown', 'WGS', 'WES', 'Targeted']
    
    df_combined, _ = map_categorical_to_global_id(df_combined, 'sequencing_method_str', vocab_output_dir, fill_value='Unknown', id_start_from=0, custom_vocab=custom_sequencing_vocab)
    print(f"测序方法分布 :")
    print(df_combined['sequencing_method_str'].value_counts())

    # 5. 加载手动癌症类型映射 (从 vocab_output_dir 加载)
    local_cancer_type_mapping_file = Path(vocab_output_dir) / "cancer_type_mapping.json"
    if not local_cancer_type_mapping_file.exists():
        raise FileNotFoundError(f"癌症类型映射文件未找到: {local_cancer_type_mapping_file}")
    with open(local_cancer_type_mapping_file, "r") as f:
        manual_cancer_type_map = json.load(f)

    # 6. 处理癌症类型并进行手动归类
    print(f"\n正在根据手动映射处理 'clin_CANCER_TYPE'...")
    if 'clin_CANCER_TYPE' in df_combined.columns:
        df_combined['clin_CANCER_TYPE'] = df_combined['clin_CANCER_TYPE'].fillna('Unknown').astype(str)
        df_combined['clin_CANCER_TYPE_manual_mapped'] = df_combined['clin_CANCER_TYPE'].apply(
            lambda x: manual_cancer_type_map.get(x, UNCLASSIFIED_LABEL)
        )

        unmapped_original_types = set(df_combined['clin_CANCER_TYPE'].unique()) - set(manual_cancer_type_map.keys())
        if unmapped_original_types:
            print(f"注意: 以下原始癌症类型未在手动映射表中明确定义，已自动归类为 '{UNCLASSIFIED_LABEL}':")
            for i, val in enumerate(list(unmapped_original_types)):
                if i < 20:
                    print(f"  - {val}")
                else:
                    print(f"  ...等 {len(unmapped_original_types) - 20} 更多未映射类型。")
        print(f"手动归类后的癌症类型分布 (前20):")
        print(df_combined['clin_CANCER_TYPE_manual_mapped'].value_counts().head(20))
    else:
        warnings.warn("警告: 合并数据中缺少 'clin_CANCER_TYPE' 列，无法进行手动归类。", UserWarning)

    # 7. 生成全局 ID 映射和特征列
    print("\n--- 正在生成全局特征ID ---")

    # 7.1. 对原始 clin_CANCER_TYPE 进行全局ID映射
    if 'clin_CANCER_TYPE' in df_combined.columns:
        df_combined, _ = map_categorical_to_global_id(df_combined, 'clin_CANCER_TYPE', vocab_output_dir, fill_value='Unknown')

    # 7.2. 对手动映射后的 clin_CANCER_TYPE_manual_mapped 进行全局ID映射
    if 'clin_CANCER_TYPE_manual_mapped' in df_combined.columns:
        df_combined, _ = map_categorical_to_global_id(df_combined, 'clin_CANCER_TYPE_manual_mapped', vocab_output_dir, fill_value=UNCLASSIFIED_LABEL)

    # 7.3. 处理其他分类特征并生成全局ID
    categorical_cols_for_id_mapping = [
        'clin_CANCER_TYPE_DETAILED', 'clin_ONCOTREE_CODE'
    ]
    categorical_cols_for_id_mapping = [col for col in categorical_cols_for_id_mapping if col in df_combined.columns]

    print("\n正在处理其他分类特征并生成全局ID...")
    for col in tqdm(categorical_cols_for_id_mapping, desc="处理分类特征"):
        df_combined, _ = map_categorical_to_global_id(df_combined, col, vocab_output_dir)

    # 7.4. 处理数值特征
    numerical_cols = [
        'clin_AGE_AT_SEQ_REPORTED_YEARS', 'clin_TUMOR_PURITY'
    ]
    numerical_cols = [col for col in numerical_cols if col in df_combined.columns]

    print("\n正在处理数值特征...")
    for col in tqdm(numerical_cols, desc="处理数值特征"):
        df_combined[col] = pd.to_numeric(df_combined[col], errors='coerce')
        if df_combined[col].isnull().any():
            print(f"  列 '{col}' 包含NaN值，将保留为浮点数。")
        else:
            print(f"  列 '{col}' 没有缺失值。")
        df_combined[col] = df_combined[col].astype(np.float32)

    print("--- 全局特征ID生成完成。---")

    # 8. 整理最终的DataFrame列
    print("\n--- 整理最终数据并保存 ---")
    final_cols = ['sampleId', 'studyId', 'sequencing_method_str', 'sequencing_method_str_id']

    if 'clin_CANCER_TYPE' in df_combined.columns:
        final_cols.extend(['clin_CANCER_TYPE', 'clin_CANCER_TYPE_id'])
    if 'clin_CANCER_TYPE_manual_mapped' in df_combined.columns:
        final_cols.extend(['clin_CANCER_TYPE_manual_mapped', 'clin_CANCER_TYPE_manual_mapped_id'])

    final_cols.extend([f'{col}_id' for col in categorical_cols_for_id_mapping])
    final_cols.extend(numerical_cols)

    final_cols = [col for col in final_cols if col in df_combined.columns]
    df_final = df_combined[final_cols].copy()

    output_path_full = Path(clinical_data_output_dir) / "combined_processed_clinical_samples.parquet"
    print(f"正在保存所有处理后的合并临床数据到 {output_path_full} ({df_final.shape[0]} 行)...")
    df_final.to_parquet(output_path_full, index=False)
    print("所有数据保存完成。")

    print("\n所有临床数据预处理和合并任务完成。")

if __name__ == '__main__':
    combine_and_preprocess_clinical_data(
        CBIOPORTAL_INPUT_CLINICAL_CSV_PATH,
        GENIE_INPUT_CLINICAL_CSV_PATH,
        CLINICAL_DATA_OUTPUT_DIR,
        VOCAB_OUTPUT_DIR,
        cbioportal_sequencing_methods_file=CBIO_SEQUENCING_METHODS_FILE
    )