# src/scripts/all_step2_generate_long_sequences_for_evo2.py
import pandas as pd
import numpy as np
from pathlib import Path
import json
import warnings
import pyfaidx
from tqdm import tqdm
import re # 导入正则表达式模块
import pyarrow.parquet as pq # 导入 pyarrow.parquet

# --- 警告过滤设置 ---
warnings.filterwarnings(
    "ignore",
    message="邻近突变 .* 是 Indel 或不等长替换。为保持固定长度，跳过此邻近突变。",
    category=UserWarning
)

# --- 配置参数 (与 prepare_mutation_features_and_ids.py 保持一致) ---
REFERENCE_GENOME_FASTAS = {
    "GRCh37": "/ai/data-container/yzq/database/genome/human/hg19/GRCh37.p13.genome.fa",
    "GRCh38": "/ai/data-container/yzq/database/genome/human/hg38/GRCh38.primary_assembly.genome.fa",
}

COMBINED_OUTPUT_DIR = "/gpfs/flash/home/yzq/project/model/MutaGPT/data/processed/combined_mutation_data_0824"
Path(COMBINED_OUTPUT_DIR).mkdir(parents=True, exist_ok=True)

LONG_SEQUENCE_WINDOW_SIZE = 500 
MAX_VARIANT_ALLELE_LENGTH = 300

# --- 辅助函数 (与 prepare_mutation_features_and_ids.py 保持一致) ---
_loaded_ref_genomes = {}
def load_all_ref_genomes(fasta_paths_dict):
    global _loaded_ref_genomes
    if not _loaded_ref_genomes:
        print("正在加载所有指定的参考基因组...")
        for build, path in fasta_paths_dict.items():
            if not Path(path).exists():
                raise FileNotFoundError(f"参考基因组FASTA文件未找到，版本 '{build}': {path}")
            print(f"  正在加载 {build} 来自 {path}...")
            _loaded_ref_genomes[build] = pyfaidx.Fasta(path, as_raw=True)
        print("所有参考基因组加载完成。")
    return _loaded_ref_genomes

def _get_fasta_chrom_alias(input_chrom, ref_genome):
    input_chrom_str = str(input_chrom)
    input_chrom_upper = input_chrom_str.upper()
    aliases_to_try = [input_chrom_str]
    if input_chrom_upper.startswith('CHR'):
        aliases_to_try.append(input_chrom_str[3:]) 
    else:
        aliases_to_try.append(f'chr{input_chrom_str}') 
    if input_chrom_upper == '23' or input_chrom_upper == 'CHRX':
        aliases_to_try.extend(['X', 'chrX'])
    elif input_chrom_upper == '24' or input_chrom_upper == 'CHRY':
        aliases_to_try.extend(['Y', 'chrY'])
    if input_chrom_upper in ['M', 'MT', 'CHRM', 'CHRMT']:
        aliases_to_try.extend(['M', 'MT', 'chrM', 'chrMT'])
    final_aliases = []
    seen = set()
    for alias in aliases_to_try:
        if alias not in seen:
            final_aliases.append(alias)
            seen.add(alias)
    for alias in final_aliases:
        if alias in ref_genome:
            return alias
    return None

def get_composite_long_sequence_contexts(
    current_mutation_row: pd.Series,
    all_chrom_mutations_df: pd.DataFrame, 
    ref_genomes_dict: dict,
    window_size: int,
    max_variant_allele_length: int 
) -> tuple[str, str]:
    """
    从参考基因组获取突变位点周围的参考序列和突变后的序列上下文。
    此函数现在只对已经过滤过的单碱基突变调用。
    """
    chrom_original = str(current_mutation_row['chromosome'])
    pos = current_mutation_row['startPosition'] 
    ref_allele_raw = str(current_mutation_row['referenceAllele']).upper()
    
    alt_allele_raw_orig = current_mutation_row['variantAllele']
    if pd.isna(alt_allele_raw_orig):
        alt_allele_raw = '-' 
    else:
        alt_allele_raw = str(alt_allele_raw_orig).upper()

    build = str(current_mutation_row['ncbiBuild'])

    ref_allele_len = len(ref_allele_raw) if ref_allele_raw != '-' else 0
    alt_allele_len = len(alt_allele_raw) if alt_allele_raw != '-' else 0

    if alt_allele_len > max_variant_allele_length:
        warnings.warn(f"突变 {chrom_original}:{pos} 的 variantAllele 长度 ({alt_allele_len}) 超过最大限制 ({max_variant_allele_length})。返回填充的 'N' 序列。")
        return 'N' * (2 * window_size + 1), 'N' * (2 * window_size + 1)

    alt_allele_for_seq = alt_allele_raw if alt_allele_raw != '-' else ''

    full_seq_len = 2 * window_size + 1
    default_n_seq = 'N' * full_seq_len

    if build not in ref_genomes_dict:
        warnings.warn(f"参考基因组FASTA未加载，版本 '{build}'。跳过 {chrom_original}:{pos} 的上下文获取。")
        return default_n_seq, default_n_seq

    ref_genome = ref_genomes_dict[build]
    fasta_chrom = _get_fasta_chrom_alias(chrom_original, ref_genome)

    if fasta_chrom is None: 
        warnings.warn(f"染色体 '{chrom_original}' 在版本 '{build}' 的FASTA中未找到（尝试常见别名后）。跳过 {chrom_original}:{pos} 的上下文获取。")
        return default_n_seq, default_n_seq

    try:
        ref_seq_str = default_n_seq
        left_flank_seq = ""
        padded_right_flank_seq = ""
        mut_seq_str_current = default_n_seq

        p0 = pos - 1 

        ref_window_start_p0 = p0 - window_size
        ref_window_end_p0 = p0 + window_size + 1 

        chrom_len = len(ref_genome[fasta_chrom])
        
        actual_extract_start = max(0, ref_window_start_p0)
        actual_extract_end = min(chrom_len, ref_window_end_p0)

        raw_ref_context_seq = ref_genome[fasta_chrom][actual_extract_start:actual_extract_end].upper()

        padded_ref_seq_list = ['N'] * full_seq_len
        padding_left_ref = max(0, -ref_window_start_p0)
        for i, base in enumerate(raw_ref_context_seq):
            if padding_left_ref + i < len(padded_ref_seq_list):
                padded_ref_seq_list[padding_left_ref + i] = base
        ref_seq_str = "".join(padded_ref_seq_list)

        fasta_ref_segment_start_p0 = p0
        fasta_ref_segment_end_p0 = p0 + ref_allele_len

        if fasta_ref_segment_start_p0 < 0 or fasta_ref_segment_end_p0 > chrom_len:
             return default_n_seq, default_n_seq
        
        actual_fasta_ref_segment = ref_genome[fasta_chrom][fasta_ref_segment_start_p0:fasta_ref_segment_end_p0].upper()

        if ref_allele_raw != '-' and actual_fasta_ref_segment != ref_allele_raw:
            return default_n_seq, default_n_seq
        
        current_mut_idx_in_padded = p0 - ref_window_start_p0 + padding_left_ref

        left_flank_seq = ref_seq_str[:current_mut_idx_in_padded]
        
        right_flank_needed_len = full_seq_len - len(left_flank_seq) - alt_allele_len
        
        if ref_allele_raw == '-': 
            right_flank_start_p0 = p0 + 1
        else: 
            right_flank_start_p0 = p0 + ref_allele_len

        raw_right_flank_seq = ref_genome[fasta_chrom][right_flank_start_p0 : right_flank_start_p0 + right_flank_needed_len].upper()
        
        padded_right_flank_list = ['N'] * right_flank_needed_len
        for i, base in enumerate(raw_right_flank_seq):
            if i < len(padded_right_flank_list):
                padded_right_flank_list[i] = base
        padded_right_flank_seq = "".join(padded_right_flank_list)

        mut_seq_str_current = left_flank_seq + alt_allele_for_seq + padded_right_flank_seq
        
        if len(mut_seq_str_current) != full_seq_len:
            warnings.warn(f"内部错误：当前突变 {chrom_original}:{pos} 复合序列长度不正确。预期 {full_seq_len}，实际 {len(mut_seq_str_current)}。返回填充的 'N' 序列。")
            return default_n_seq, default_n_seq

        mutated_seq_list = list(mut_seq_str_current)

        neighboring_mutations = all_chrom_mutations_df[
            (all_chrom_mutations_df['startPosition'] >= (pos - window_size)) &
            (all_chrom_mutations_df['startPosition'] <= (pos + window_size)) &
            (all_chrom_mutations_df['startPosition'] != pos) 
        ].sort_values(by='startPosition') 

        for _, neighbor_row in neighboring_mutations.iterrows():
            neighbor_pos = neighbor_row['startPosition']
            neighbor_ref_allele_raw = str(neighbor_row['referenceAllele']).upper() 
            
            neighbor_alt_allele_raw_orig = neighbor_row['variantAllele'] 
            if pd.isna(neighbor_alt_allele_raw_orig):
                neighbor_alt_allele_raw = '-' 
            else:
                neighbor_alt_allele_raw = str(neighbor_alt_allele_raw_orig).upper()

            neighbor_ref_allele_len = len(neighbor_ref_allele_raw) if neighbor_ref_allele_raw != '-' else 0
            neighbor_alt_allele_len = len(neighbor_alt_allele_raw) if neighbor_alt_allele_raw != '-' else 0
            neighbor_alt_allele_for_seq = neighbor_alt_allele_raw if neighbor_alt_allele_raw != '-' else ''

            if neighbor_alt_allele_len > max_variant_allele_length:
                continue 

            neighbor_p0 = neighbor_pos - 1

            neighbor_fasta_ref_segment_start_p0 = neighbor_p0
            neighbor_fasta_ref_segment_end_p0 = neighbor_p0 + neighbor_ref_allele_len

            if neighbor_fasta_ref_segment_start_p0 < 0 or neighbor_fasta_ref_segment_end_p0 > chrom_len:
                continue
            
            actual_neighbor_fasta_ref_segment = ref_genome[fasta_chrom][neighbor_fasta_ref_segment_start_p0:neighbor_fasta_ref_segment_end_p0].upper()

            if neighbor_ref_allele_raw != '-' and actual_neighbor_fasta_ref_segment != neighbor_ref_allele_raw:
                continue
            
            if neighbor_ref_allele_len == neighbor_alt_allele_len: 
                neighbor_idx_in_mutated_list = neighbor_p0 - ref_window_start_p0 + padding_left_ref
                if 0 <= neighbor_idx_in_mutated_list < len(mutated_seq_list) and \
                   (neighbor_idx_in_mutated_list + neighbor_ref_allele_len) <= len(mutated_seq_list):
                    for k in range(neighbor_ref_allele_len):
                        mutated_seq_list[neighbor_idx_in_mutated_list + k] = neighbor_alt_allele_for_seq[k]
                else:
                    warnings.warn(f"邻近突变 {chrom_original}:{neighbor_pos} 替换区域超出当前复合序列边界。跳过此邻近突变。")
            else:
                warnings.warn(f"邻近突变 {chrom_original}:{neighbor_pos} 是 Indel 或不等长替换。为保持固定长度，跳过此邻近突变。")
                
        mut_seq_str = "".join(mutated_seq_list)

        return ref_seq_str, mut_seq_str

    except Exception as e:
        warnings.warn(f"获取 {build} {chrom_original} (解析为 {fasta_chrom}):{pos} 的长序列上下文时出错。错误: {e}。返回填充的 'N' 序列。")
        return default_n_seq, default_n_seq


def generate_long_sequences_for_evo2(input_parquet_path: str, output_dir: str, ref_genome_paths_dict: dict):
    """
    加载包含基础特征的突变数据，生成或补充长序列上下文，并保存最终文件。
    """
    print(f"\n--- 正在加载基础特征数据: {input_parquet_path} ---")
    df_combined = pd.read_parquet(input_parquet_path)
    print(f"加载的基础特征数据形状: {df_combined.shape}")

    # 确保 ncbiBuild 列存在并标准化，因为长序列生成依赖它
    if 'ncbiBuild' not in df_combined.columns:
        warnings.warn("输入数据缺少 'ncbiBuild' 列，假定所有突变均为 GRCh38。")
        df_combined['ncbiBuild'] = 'GRCh38'
    else:
        df_combined['ncbiBuild'] = df_combined['ncbiBuild'].replace({'37': 'GRCh37', '38': 'GRCh38'}).astype(str)
        # 移除 ncbiBuild 为 'nan' 的行
        df_combined = df_combined[df_combined['ncbiBuild'] != 'nan'].copy()


    dedup_cols_global = ['sampleId', 'chromosome', 'startPosition', 'referenceAllele', 'variantAllele', 'variantType']
    required_long_seq_cols = ['reference_long_sequence', 'mutated_long_sequence']
    output_path_full = Path(output_dir) / "combined_processed_mutations_grch38.parquet"

    # 标记哪些行需要重新生成长序列
    df_combined['_needs_long_seq_generation'] = True 

    if output_path_full.exists():
        try:
            parquet_schema = pq.read_schema(output_path_full)
            existing_cols = set(parquet_schema.names)

            if all(col in existing_cols for col in required_long_seq_cols):
                print(f"\n检测到最终文件 {output_path_full} 已存在并包含所有长序列上下文列。尝试加载现有长序列数据。")
                
                cols_to_load = dedup_cols_global + required_long_seq_cols
                df_existing_long_seq = pd.read_parquet(output_path_full, columns=cols_to_load)
                
                df_combined = df_combined.set_index(dedup_cols_global)
                df_existing_long_seq = df_existing_long_seq.set_index(dedup_cols_global)
                
                df_combined.update(df_existing_long_seq[required_long_seq_cols])
                df_combined = df_combined.reset_index()
                
                df_combined['_needs_long_seq_generation'] = df_combined['reference_long_sequence'].isnull() | \
                                                             df_combined['mutated_long_sequence'].isnull()
                
                if df_combined['_needs_long_seq_generation'].sum() == 0:
                    print("所有长序列上下文已成功加载且完整。")
                else:
                    warnings.warn(f"警告：部分突变 ({df_combined['_needs_long_seq_generation'].sum()} 条) 在现有长序列文件中未找到对应数据或为NaN，将尝试重新生成。")
            else:
                print(f"\n最终文件 {output_path_full} 存在，但缺少长序列上下文列。将重新生成所有长序列。")
        except Exception as e:
            print(f"\n读取最终文件 {output_path_full} 的元数据或数据时出错 ({e})。将重新生成所有长序列上下文。")
    else:
        print(f"\n最终文件 {output_path_full} 不存在。将生成所有长序列上下文。")

    # 仅对需要生成的行进行计算
    df_to_process_for_long_seq = df_combined[df_combined['_needs_long_seq_generation']].copy()
    
    if not df_to_process_for_long_seq.empty:
        print(f"  正在为 {len(df_to_process_for_long_seq)} 条突变获取长序列上下文 (窗口大小: {LONG_SEQUENCE_WINDOW_SIZE})，并整合窗口内其他突变...")
        ref_genomes = load_all_ref_genomes(ref_genome_paths_dict) 
        
        all_long_seq_results = []

        grouped_by_sample = df_to_process_for_long_seq.groupby('sampleId')
        
        for sample_id, df_sample_chunk in tqdm(grouped_by_sample, desc="处理样本以获取复合长序列"):
            grouped_by_chrom_in_sample = df_sample_chunk.groupby('chromosome')
            for chrom_original, df_chrom_sample_chunk in grouped_by_chrom_in_sample:
                # 确保 full_df_chrom_sample 包含当前样本该染色体的所有突变，用于查找邻近突变
                # 这里需要从 df_combined (包含所有基础特征和已加载的长序列) 中获取完整数据
                full_df_chrom_sample = df_combined[
                    (df_combined['sampleId'] == sample_id) & 
                    (df_combined['chromosome'] == chrom_original)
                ]
                for index, current_mutation_row in df_chrom_sample_chunk.iterrows():
                    ref_seq, mut_seq = get_composite_long_sequence_contexts(
                        current_mutation_row,
                        full_df_chrom_sample, # 使用完整的染色体数据来查找邻近突变
                        ref_genomes,
                        LONG_SEQUENCE_WINDOW_SIZE,
                        MAX_VARIANT_ALLELE_LENGTH 
                    )
                    all_long_seq_results.append({
                        'sampleId': current_mutation_row['sampleId'],
                        'chromosome': current_mutation_row['chromosome'],
                        'startPosition': current_mutation_row['startPosition'],
                        'referenceAllele': current_mutation_row['referenceAllele'],
                        'variantAllele': current_mutation_row['variantAllele'],
                        'reference_long_sequence': ref_seq,
                        'mutated_long_sequence': mut_seq
                    })
            
        df_new_long_seq_results = pd.DataFrame(all_long_seq_results)
        
        # 将新生成的结果合并回 df_combined
        df_combined = df_combined.set_index(dedup_cols_global)
        df_new_long_seq_results = df_new_long_seq_results.set_index(dedup_cols_global)
        df_combined.update(df_new_long_seq_results[required_long_seq_cols])
        df_combined = df_combined.reset_index()

        print(f"  已生成 {len(df_new_long_seq_results)} 条新的长序列上下文。")
    else:
        print("  所有长序列上下文已存在，无需重新生成。")

    # 移除临时标记列
    df_combined = df_combined.drop(columns=['_needs_long_seq_generation'])

    initial_rows_long_seq_filter = df_combined.shape[0]
    full_n_sequence = 'N' * (2 * LONG_SEQUENCE_WINDOW_SIZE + 1)
    df_combined = df_combined[~((df_combined['reference_long_sequence'] == full_n_sequence) | (df_combined['mutated_long_sequence'] == full_n_sequence))].copy()
    removed_long_seq_rows = initial_rows_long_seq_filter - df_combined.shape[0]
    if removed_long_seq_rows > 0:
        print(f"  已移除 {removed_long_seq_rows} 行无法生成有效长序列上下文的突变数据。")
    else:
        print(f"  所有突变都成功生成了长序列上下文。")
    print(f"  生成长序列上下文后数据形状: {df_combined.shape}")

    # 6. 整理最终的DataFrame
    print("\n--- 整理最终数据并保存 ---")
    df_combined = df_combined.sort_values(by=['sampleId', 'chromosome_id', 'startPosition'])

    # 定义最终要保留的列
    processed_cols = [
        'sampleId', 'chromosome_id', 
        'exon_strand_id', 
        'pentanucleotide_context_id',   # MODIFIED
        'pentanucleotide_context_str',  # MODIFIED
        'reference_long_sequence',   
        'mutated_long_sequence',     
        'studyId',
        'chromosome', 
        'startPosition', 
        'referenceAllele', 
        'variantAllele', 
        'variantType', 
        'pos_bin_id', 
    ]
    processed_cols = [col for col in processed_cols if col in df_combined.columns]
    df_processed = df_combined[processed_cols]

    output_path_full = Path(output_dir) / "combined_processed_mutations_grch38.parquet"
    print(f"正在保存所有处理后的合并数据到 {output_path_full} ({df_processed.shape[0]} 行)...")
    df_processed.to_parquet(output_path_full, index=False)
    print("所有数据保存完成。")

    print("\n所有长序列生成任务完成。")


if __name__ == '__main__':
    load_all_ref_genomes(REFERENCE_GENOME_FASTAS)

    # 中间文件路径
    base_features_path = Path(COMBINED_OUTPUT_DIR) / "combined_processed_mutations_base_features.parquet"

    generate_long_sequences_for_evo2(
        input_parquet_path=base_features_path,
        output_dir=COMBINED_OUTPUT_DIR,
        ref_genome_paths_dict=REFERENCE_GENOME_FASTAS
    )