import fire
import numpy as np
import pandas as pd
import json
import warnings
import hashlib
from pathlib import Path
import math
from typing import Optional, List, Tuple, Union

import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader, Sampler, Dataset, DistributedSampler
import torchmetrics
import torchmetrics.classification as tm_cls

import pytorch_lightning as pl
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import TQDMProgressBar, EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import CSVLogger
from pytorch_lightning.strategies import DDPStrategy

# 导入学习率调度器
from torch.optim.lr_scheduler import CosineAnnealingLR, ReduceLROnPlateau, LambdaLR

from light.core.logger import ConsoleLogger
from light.core.random import worker_init_fn_seed

from muta.data.mutation_data import MutationPretrainDataset, MutationClassificationDataset
from muta.model.mutation_performer import PerformerMutationLM, PerformerMutationClassifier
from muta.model.performer import Performer, default

torch.set_float32_matmul_precision('medium') 

def hash_string_to_int(s):
    """将字符串哈希为整数，用于确定性划分。"""
    return int(hashlib.md5(s.encode()).hexdigest(), 16)

class LengthBasedBatchSampler(Sampler):
    def __init__(self, data_source: Union[Dataset, DistributedSampler], batch_size: int,
                 bucket_boundaries: Optional[List[int]] = None,
                 drop_last: bool = False, shuffle: bool = True):
        self.batch_size = batch_size
        self.drop_last = drop_last
        self.shuffle = shuffle

        _resolved_bucket_boundaries = default(bucket_boundaries, [10, 25, 50, 100, 200, 512, 1024, 2048, 4096, 8192, 16384, 25629])
        self.bucket_boundaries = sorted(_resolved_bucket_boundaries)

        self.is_distributed_sampler = isinstance(data_source, DistributedSampler)

        if self.is_distributed_sampler:
            self.original_dataset = data_source.dataset
            self.rank_specific_indices = list(data_source) 
            self.ds_drop_last = data_source.drop_last 
        else:
            self.original_dataset = data_source
            self.rank_specific_indices = list(range(len(data_source)))
            self.ds_drop_last = False 
        
        if not hasattr(self.original_dataset, 'sample_id_to_mutation_count') or \
           not hasattr(self.original_dataset, 'sample_ids'):
            raise AttributeError("The dataset must have 'sample_id_to_mutation_count' and 'sample_ids' attributes for LengthBasedBatchSampler.")

        self.sample_lengths = [
            self.original_dataset.sample_id_to_mutation_count[self.original_dataset.sample_ids[idx]]
            for idx in self.rank_specific_indices
        ]
        
        self.indexed_sample_lengths = [(self.rank_specific_indices[i], length) 
                                       for i, length in enumerate(self.sample_lengths)]

        self.buckets = {boundary: [] for boundary in self.bucket_boundaries + [float('inf')]}
        
        for original_idx, length in self.indexed_sample_lengths:
            assigned = False
            for boundary in self.bucket_boundaries:
                if length <= boundary:
                    self.buckets[boundary].append(original_idx)
                    assigned = True
                    break
            if not assigned:
                self.buckets[float('inf')].append(original_idx)

        self.buckets = {k: v for k, v in self.buckets.items() if v}
        
        self._precalculated_batches = self._generate_all_batches_for_rank()
        self._local_num_batches = len(self._precalculated_batches)

        world_size = torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1
        if torch.distributed.is_initialized():
            current_rank = torch.distributed.get_rank()
            local_num_batches_tensor = torch.tensor(self._local_num_batches, dtype=torch.int64, device=f'cuda:{current_rank}')
            gathered_num_batches = [torch.zeros_like(local_num_batches_tensor) for _ in range(world_size)]
            torch.distributed.all_gather(gathered_num_batches, local_num_batches_tensor)
            
            self._consistent_total_batches = min([t.item() for t in gathered_num_batches])
        else:
            self._consistent_total_batches = self._local_num_batches

    def _generate_all_batches_for_rank(self):
        all_batches = []
        bucket_keys = list(self.buckets.keys())

        for boundary in bucket_keys:
            bucket = self.buckets[boundary]
            
            for i in range(0, len(bucket), self.batch_size):
                batch = bucket[i:i + self.batch_size]
                if self.drop_last and len(batch) < self.batch_size:
                    continue
                all_batches.append(batch)
        
        return all_batches

    def __iter__(self):
        batches_to_yield = list(self._precalculated_batches)
        if self.shuffle:
            np.random.shuffle(batches_to_yield)
        
        batches_to_yield = batches_to_yield[:self._consistent_total_batches]
        
        return iter(batches_to_yield)

    def __len__(self):
        return self._consistent_total_batches

def collate_fn_for_mutation_data(batch: List[tuple[dict, torch.Tensor]]):
    if len(batch) == 0:
        warnings.warn("collate_fn received an empty batch!")
        return {}, torch.tensor([])
    
    inputs_list = [item[0] for item in batch]
    labels_list = [item[1] for item in batch]

    max_batch_seq_len = 0
    for inputs in inputs_list:
        if 'attention_mask' in inputs and inputs['attention_mask'].dim() > 0:
            current_seq_len = inputs['attention_mask'].shape[0]
        else:
            if 'pentanucleotide_context_ids' in inputs and inputs['pentanucleotide_context_ids'].dim() > 0:
                current_seq_len = inputs['pentanucleotide_context_ids'].shape[0]
            else:
                current_seq_len = 0 
        max_batch_seq_len = max(max_batch_seq_len, current_seq_len)
    
    if len(inputs_list) == 0:
        return {}, torch.tensor([])
    
    if max_batch_seq_len == 0:
        warnings.warn("collate_fn: 批次中所有样本的有效序列长度为0。将 max_batch_seq_len 设为 1。")
        max_batch_seq_len = 1

    padded_inputs = {}
    for key in inputs_list[0].keys():
        tensors_to_pad = []
        for inputs in inputs_list:
            tensor = inputs[key]
            current_seq_len = tensor.shape[0] if tensor.dim() > 0 else 0
            
            if tensor.dim() == 1:
                pad_len = max_batch_seq_len - current_seq_len
                padded_tensor = F.pad(tensor, (0, pad_len), 'constant', 0) 
            elif tensor.dim() == 2:
                if key == 'ref_evo2_embeddings': 
                    pad_h = max_batch_seq_len - tensor.shape[0]
                    padded_tensor = F.pad(tensor, (0, 0, 0, pad_h), 'constant', 0.0) 
                elif key == 'genomic_attention_mask_base': # This key is not in the current dataset, but good to keep if needed later
                    pad_h = max_batch_seq_len - tensor.shape[0]
                    pad_w = max_batch_seq_len - tensor.shape[1]
                    padded_tensor = F.pad(tensor, (0, pad_w, 0, pad_h), 'constant', float('-inf'))
                else: 
                    pad_h = max_batch_seq_len - tensor.shape[0]
                    padded_tensor = F.pad(tensor, (0, 0, 0, pad_h), 'constant', 0.0)
            else:
                warnings.warn(f"collate_fn: 遇到未知维度的张量，键: {key}, 维度: {tensor.dim()}。未填充。")
                padded_tensor = tensor
            tensors_to_pad.append(padded_tensor)
        
        if tensors_to_pad:
            padded_inputs[key] = torch.stack(tensors_to_pad)
        else:
            padded_inputs[key] = torch.tensor([])

    labels = torch.stack(labels_list)

    return padded_inputs, labels


class MutationDataModule(pl.LightningDataModule):
    """
    用于DNA突变数据的PyTorch Lightning DataModule。
    """
    def __init__(self, df_path, data_root, data_args={},
                 task='pretrain', 
                 col_group='dataset', 
                 batch_size=32, num_workers=0, pin_memory=True, shuffle=True,
                 clinical_df_path=None,
                 col_label='clin_CANCER_TYPE_manual_mapped_id', 
                 col_sequencing_method='sequencing_method_str_id', 
                 col_exon_strand='exon_strand_id', 
                 bucket_batch_size: int = 32,
                 bucket_boundaries: Optional[List[int]] = None,
                 stochastic_chrom_shuffling: bool = False,
                 random_unknown_strand_prob: float = 0.01,
                 evo2_embeddings_dir: Optional[str] = None,
                 evo2_embedding_layer: Optional[Union[str, List[str]]] = None, 
                 max_seq_len: int = 512, # 确保 max_seq_len 被传递
                 ):
        super().__init__()
        self.df_path = df_path 
        self.clinical_df_path = clinical_df_path 
        self.col_label = col_label 
        self.col_sequencing_method = col_sequencing_method 
        self.col_exon_strand = col_exon_strand 
        self.data_root = Path(data_root)
        self.data_args = data_args
        self.task = task
        self.col_group = col_group
        self.num_workers = num_workers
        self.shuffle = shuffle
        self.pin_memory = pin_memory
        self.bucket_batch_size = bucket_batch_size
        self.bucket_boundaries = default(bucket_boundaries, [10, 25, 50, 100, 200, 512, 1024, 2048, 4096, 8192, 16384, 25629])
        self.stochastic_chrom_shuffling = stochastic_chrom_shuffling
        self.random_unknown_strand_prob = random_unknown_strand_prob
        self.evo2_embeddings_dir = evo2_embeddings_dir
        self.evo2_embedding_layer = evo2_embedding_layer # 保持原始参数名，但内部处理为列表
        self.max_seq_len = max_seq_len # 保存 max_seq_len

    def prepare_data(self):
        pass

    def setup(self, stage=None):
        df_mutations = pd.read_parquet(self.df_path)

        if self.clinical_df_path is None:
            raise ValueError("必须提供 clinical_df_path 以获取测序方法信息。")

        df_clinical_full = pd.read_parquet(self.clinical_df_path)
        if 'sampleId' not in df_clinical_full.columns or self.col_sequencing_method not in df_clinical_full.columns:
            raise ValueError(f"临床数据文件 '{self.clinical_df_path}' 必须包含 'sampleId' 和 '{self.col_sequencing_method}' 列。")

        df_clinical_for_merge = df_clinical_full[['sampleId', self.col_sequencing_method]].drop_duplicates(subset=['sampleId'])

        df_mutations = pd.merge(df_mutations, df_clinical_for_merge, on='sampleId', how='left')

        nan_sequencing_methods = df_mutations[self.col_sequencing_method].isnull().sum()
        if nan_sequencing_methods > 0:
            warnings.warn(f"在合并突变数据和临床数据后，有 {nan_sequencing_methods} 个突变记录的 '{self.col_sequencing_method}' 为 NaN。这些将被填充为 ID 0。")
            df_mutations[self.col_sequencing_method].fillna(0, inplace=True)
        df_mutations[self.col_sequencing_method] = df_mutations[self.col_sequencing_method].astype(int)

        unique_sample_ids = df_mutations['sampleId'].unique()

        hashed_sample_ids = sorted([(hash_string_to_int(str(s_id)), s_id) for s_id in unique_sample_ids])
        sorted_sample_ids = [s_id for _, s_id in hashed_sample_ids]

        train_ratio = 0.8
        valid_ratio = 0.1

        num_samples = len(sorted_sample_ids)
        num_train = int(num_samples * train_ratio)
        num_valid = int(num_samples * valid_ratio)

        train_sample_ids = sorted_sample_ids[:num_train]
        valid_sample_ids = sorted_sample_ids[num_train : num_train + num_valid]
        test_sample_ids = sorted_sample_ids[num_train + num_valid :]

        print(f"数据划分（确定性哈希）：训练集 {len(train_sample_ids)} 样本 ({train_ratio*100}%)，验证集 {len(valid_sample_ids)} 样本 ({valid_ratio*100}%)，测试集 {len(test_sample_ids)} 样本 ({(1-train_ratio-valid_ratio)*100}%).")

        dataset_common_args = {
            "data_root": self.data_root,
            "col_sequencing_method": self.col_sequencing_method,
            "col_exon_strand": self.col_exon_strand,
            "stochastic_chrom_shuffling": self.stochastic_chrom_shuffling,
            "random_unknown_strand_prob": self.random_unknown_strand_prob,
            "evo2_embeddings_dir": self.evo2_embeddings_dir,
            "evo2_embedding_layer": self.evo2_embedding_layer, # 传递 evo2_embedding_layer (可以是字符串或列表)
            "max_seq_len": self.max_seq_len, # 传递 max_seq_len 给 Dataset
            **self.data_args
        }

        if self.task == 'pretrain':
            DatasetCls = MutationPretrainDataset
            df_train = df_mutations[df_mutations['sampleId'].isin(train_sample_ids)].copy()
            df_valid = df_mutations[df_mutations['sampleId'].isin(valid_sample_ids)].copy()
            df_test = df_mutations[df_mutations['sampleId'].isin(test_sample_ids)].copy()

            self.ds_train = DatasetCls(df_train, training=True, **dataset_common_args)
            self.ds_valid = DatasetCls(df_valid, training=True, **dataset_common_args)
            self.ds_test = DatasetCls(df_test, training=True, **dataset_common_args)

        elif self.task == 'classify':
            DatasetCls = MutationClassificationDataset
            df_merged_for_classify = pd.merge(df_mutations, df_clinical_full[['sampleId', self.col_label]],
                                              on='sampleId', how='inner')

            initial_rows = df_merged_for_classify.shape[0]
            df_merged_for_classify.dropna(subset=[self.col_label], inplace=True)
            if df_merged_for_classify.shape[0] < initial_rows:
                warnings.warn(f"分类任务：移除了 {initial_rows - df_merged_for_classify.shape[0]} 行因缺失标签而无法使用的样本。")

            df_train = df_merged_for_classify[df_merged_for_classify['sampleId'].isin(train_sample_ids)].copy()
            df_valid = df_merged_for_classify[df_merged_for_classify['sampleId'].isin(valid_sample_ids)].copy()
            df_test = df_merged_for_classify[df_merged_for_classify['sampleId'].isin(test_sample_ids)].copy()

            print(f"分类任务数据划分（实际）：训练集 {len(df_train['sampleId'].unique())} 样本，验证集 {len(df_valid['sampleId'].unique())} 样本，测试集 {len(df_test['sampleId'].unique())} 样本。")

            self.ds_train = DatasetCls(df_train, training=True, col_label=self.col_label, **dataset_common_args)
            self.ds_valid = DatasetCls(df_valid, training=False, col_label=self.col_label, **dataset_common_args)
            self.ds_test = DatasetCls(df_test, training=False, col_label=self.col_label, **dataset_common_args)

        else:
            raise NotImplementedError(f'Not implemented task: {self.task}')


    def train_dataloader(self):
        if torch.distributed.is_initialized():
            distributed_sampler = DistributedSampler(
                self.ds_train,
                shuffle=self.shuffle,
                drop_last=True 
            )
            data_source_for_sampler = distributed_sampler
        else:
            data_source_for_sampler = self.ds_train

        train_sampler = LengthBasedBatchSampler(
            data_source_for_sampler,
            batch_size=self.bucket_batch_size,
            bucket_boundaries=self.bucket_boundaries,
            drop_last=True,
            shuffle=self.shuffle
        )
        return DataLoader(self.ds_train, num_workers=self.num_workers,
                          pin_memory=self.pin_memory, worker_init_fn=worker_init_fn_seed,
                          batch_sampler=train_sampler, collate_fn=collate_fn_for_mutation_data)

    def val_dataloader(self):
        if torch.distributed.is_initialized():
            distributed_sampler = DistributedSampler(
                self.ds_valid,
                shuffle=False, 
                drop_last=False 
            )
            data_source_for_sampler = distributed_sampler
        else:
            data_source_for_sampler = self.ds_valid

        val_sampler = LengthBasedBatchSampler(
            data_source_for_sampler,
            batch_size=self.bucket_batch_size,
            bucket_boundaries=self.bucket_boundaries,
            drop_last=False,
            shuffle=False
        )
        return DataLoader(self.ds_valid, num_workers=self.num_workers,
                          pin_memory=self.pin_memory, worker_init_fn=worker_init_fn_seed,
                          batch_sampler=val_sampler, collate_fn=collate_fn_for_mutation_data)

    def test_dataloader(self):
        if torch.distributed.is_initialized():
            distributed_sampler = DistributedSampler(
                self.ds_test,
                shuffle=False, 
                drop_last=False 
            )
            data_source_for_sampler = distributed_sampler
        else:
            data_source_for_sampler = self.ds_test

        test_sampler = LengthBasedBatchSampler(
            data_source_for_sampler,
            batch_size=self.bucket_batch_size,
            bucket_boundaries=self.bucket_boundaries,
            drop_last=False,
            shuffle=False
        )
        return DataLoader(self.ds_test, num_workers=self.num_workers,
                          pin_memory=self.pin_memory, worker_init_fn=worker_init_fn_seed,
                          batch_sampler=test_sampler, collate_fn=collate_fn_for_mutation_data)

    def teardown(self, stage=None):
        pass


def get_model(task='pretrain', model_name='performer', num_classes=1, model_path=None, model_args={}):
    """
    根据任务类型和模型名称加载突变模型。
    model_args 应该直接包含所有所需的词汇表大小参数。
    """
    default_model_dims = dict(
        dim=64,
        depth=3,
        heads=4,
        dim_head=16,
    )
    final_model_params = default_model_dims.copy()
    final_model_params.update(model_args)

    if model_path is None:  # create a new model from scratch
        if task == 'pretrain':
            num_pentanucleotide_contexts = final_model_params.get('num_pentanucleotide_contexts')
            if num_pentanucleotide_contexts is None:
                raise ValueError("num_pentanucleotide_contexts must be provided in model_args for pretrain task.")
            # num_classes_for_model is not directly used here, but for LM head output size
            # it's num_pentanucleotide_contexts + 2 (PAD_ID, MASK_ID)
            
            model = PerformerMutationLM(**final_model_params)
        elif task == 'classify':
            if 'num_classes' not in final_model_params:
                raise ValueError("num_classes must be provided in model_args for classification task.")
            model = PerformerMutationClassifier(**final_model_params)
        else:
            raise NotImplementedError(f'The task is not implemented: {task}')
    else: # Load model from model_path (for fine-tuning or testing)
        if task == 'classify':
            print(f"Loading pre-trained LM encoder weights from {model_path} for classification fine-tuning.")
            pretrain_lm_model = torch.load(model_path, weights_only=False)

            if 'num_classes' not in final_model_params:
                raise ValueError("num_classes must be provided in model_args for classification task when loading pre-trained model.")
            model = PerformerMutationClassifier(**final_model_params)
            
            model_state_dict = model.state_dict()
            pretrained_state_dict = pretrain_lm_model.state_dict()

            # Filter out lm_head and cls_token from pre-trained model
            pretrained_dict = {k: v for k, v in pretrained_state_dict.items() if k in model_state_dict and 'lm_head' not in k and 'cls_token' not in k}
            
            model_state_dict.update(pretrained_dict)
            
            model.load_state_dict(model_state_dict, strict=False)
            print("Successfully loaded pre-trained encoder weights into classifier for fine-tuning (strict=False).")
        else: # task is pretrain, or loading a full classifier model
            model = torch.load(model_path, weights_only=False)
            if task == 'pretrain':
                num_pentanucleotide_contexts = final_model_params.get('num_pentanucleotide_contexts')
                if num_pentanucleotide_contexts is None:
                    raise ValueError("num_pentanucleotide_contexts must be provided in model_args for pretrain task when loading model.")
                expected_lm_head_dim = num_pentanucleotide_contexts + 2
                if hasattr(model, 'lm_head') and model.lm_head.out_features != expected_lm_head_dim:
                    warnings.warn(f"加载的预训练模型 lm_head 维度 ({model.lm_head.out_features}) 与预期 ({expected_lm_head_dim}) 不匹配。这可能导致问题。")

    return model


class MutationPretrainModelModule(pl.LightningModule):
    """
    用于突变预训练任务的LightningModule。
    """
    def __init__(self, model_name, model, num_classes=None, output_dir=None,
                 lr=0.001, model_args=None, data_args=None,
                 ignore_index=0, # PAD_ID 通常为 0
                 weight_decay: float = 0.0, # 新增
                 lr_scheduler_type: str = 'none', # 新增
                 warmup_epochs: int = 0, # 新增
                 n_epoch: int = 10, # 新增，用于调度器
                 ):
        super().__init__()
        self.save_hyperparameters(ignore=['model'])
        if model_args is None: model_args = {}
        if data_args is None: data_args = {}
        self.lr = lr
        self.num_classes = num_classes
        self.model_name = model_name
        self.model_args = model_args
        self.model = model
        self.IGNORE_INDEX = ignore_index
        self.data_args = data_args
        self.weight_decay = weight_decay # 保存
        self.lr_scheduler_type = lr_scheduler_type # 保存
        self.warmup_epochs = warmup_epochs # 保存
        self.n_epoch = n_epoch # 保存

        self.output_dir = output_dir
        if self.output_dir is not None:
            self.output_dir = Path(self.output_dir) / 'pred'
            self.output_dir.mkdir(parents=True, exist_ok=True)

        self.train_loss = None
        self.validation_step_outputs = []
        self.test_step_outputs = []
        
        if 'num_pentanucleotide_contexts' in model_args:
            actual_num_classes = model_args['num_pentanucleotide_contexts'] + 2
        else:
            actual_num_classes = self.num_classes # Fallback, but should be provided

        self.metrics = nn.ModuleDict({
            'accuracy': tm_cls.Accuracy(task="multiclass", num_classes=actual_num_classes, ignore_index=self.IGNORE_INDEX),
            'f1_macro': tm_cls.F1Score(task="multiclass", num_classes=actual_num_classes, average='macro', ignore_index=self.IGNORE_INDEX),
            'f1_weighted': tm_cls.F1Score(task="multiclass", num_classes=actual_num_classes, average='weighted', ignore_index=self.IGNORE_INDEX),
        })


    def get_progress_bar_dict(self):
        tqdm_dict = super().get_progress_bar_dict()
        tqdm_dict.pop('v_num', None)
        return tqdm_dict

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)

        if self.lr_scheduler_type == 'none':
            return optimizer
        
        # 定义预热学习率函数
        def warmup_lambda(epoch):
            if epoch < self.warmup_epochs:
                return float(epoch) / float(max(1, self.warmup_epochs))
            return 1.0 # After warmup, factor is 1.0
        
        warmup_scheduler = LambdaLR(optimizer, lr_lambda=warmup_lambda)

        if self.lr_scheduler_type == 'cosine':
            # CosineAnnealingLR 在预热之后开始
            # T_max should be total epochs - warmup_epochs for the cosine part
            # Ensure T_max is at least 1 to avoid division by zero or negative T_max
            T_max_for_cosine = max(1, self.n_epoch - self.warmup_epochs)
            main_scheduler = CosineAnnealingLR(optimizer, T_max=T_max_for_cosine, eta_min=self.lr * 0.1) # Min LR is 10% of initial
            scheduler_config = {
                'scheduler': main_scheduler,
                'interval': 'epoch',
                'frequency': 1,
                'monitor': 'val_loss', # CosineAnnealingLR doesn't strictly need monitor, but PL often expects one for 'epoch' interval
                'name': 'CosineAnnealingLR'
            }
            if self.warmup_epochs > 0:
                return [optimizer], [
                    {'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'},
                    scheduler_config
                ]
            else:
                return [optimizer], [scheduler_config]

        elif self.lr_scheduler_type == 'plateau':
            main_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True)
            scheduler_config = {
                'scheduler': main_scheduler,
                'interval': 'epoch',
                'frequency': 1,
                'monitor': 'val_loss', # ReduceLROnPlateau requires monitor
                'name': 'ReduceLROnPlateau'
            }
            if self.warmup_epochs > 0:
                return [optimizer], [
                    {'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'},
                    scheduler_config
                ]
            else:
                return [optimizer], [scheduler_config]
        
        # Fallback if scheduler type is not recognized but not 'none'
        warnings.warn(f"Unknown lr_scheduler_type: {self.lr_scheduler_type}. No scheduler will be used beyond warmup.")
        if self.warmup_epochs > 0:
            return [optimizer], [{'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'}]
        return optimizer


    def forward(self, x):
        return self.model(x)

    def on_train_start(self):
        log_hyperparams = {
            "model_name": self.model_name,
            "num_classes": self.num_classes,
            "data_args": self.data_args,
            "lr": self.lr,
            "weight_decay": self.weight_decay, # 新增
            "lr_scheduler_type": self.lr_scheduler_type, # 新增
            "warmup_epochs": self.warmup_epochs, # 新增
            "n_epoch": self.n_epoch, # 新增
        }
        log_hyperparams.update(self.model_args)
        self.logger.log_hyperparams(log_hyperparams)

    def training_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) 
        logits = model_outputs['logits']
        # sample_embedding = model_outputs['sample_embedding'] # Not directly used for loss in pretrain
        
        loss = F.cross_entropy(logits.flatten(0, 1), y.flatten(0, 1), ignore_index=self.IGNORE_INDEX)

        self.train_loss = loss.detach()
        self.log("train_loss", self.train_loss, prog_bar=True, sync_dist=True)
        return loss

    def validation_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) 
        logits = model_outputs['logits']
        # sample_embedding = model_outputs['sample_embedding'] # Not directly used for loss in pretrain

        loss = F.cross_entropy(logits.flatten(0, 1), y.flatten(0, 1), ignore_index=self.IGNORE_INDEX)
        if self.train_loss is None: # Handle case where validation runs before any training step
            self.train_loss = torch.tensor(0.0, device=loss.device)

        self.log("val_loss", loss, prog_bar=True, sync_dist=True)
        self.validation_step_outputs.append(loss)
        return loss

    def test_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) 
        logits = model_outputs['logits']

        loss = F.cross_entropy(logits.flatten(0, 1), y.flatten(0, 1), ignore_index=self.IGNORE_INDEX)
        preds = torch.argmax(logits, dim=-1)

        self.test_step_outputs.append({'loss': loss, 'preds': preds, 'labels': y})
        return {'loss': loss, 'preds': preds, 'labels': y}

    def on_validation_epoch_end(self):
        if self.validation_step_outputs:
            avg_val_loss = torch.stack(self.validation_step_outputs).mean()
            self.log("val_loss", avg_val_loss, sync_dist=True)
            val_perplexity = torch.exp(avg_val_loss)
            self.log("val_perplexity", val_perplexity, sync_dist=True)
        self.validation_step_outputs.clear()

    def on_test_epoch_end(self):
        if self.test_step_outputs:
            all_losses = torch.stack([x['loss'] for x in self.test_step_outputs])
            all_preds = torch.cat([x['preds'].flatten() for x in self.test_step_outputs], dim=0)
            all_labels = torch.cat([x['labels'].flatten() for x in self.test_step_outputs], dim=0)

            # Filter out ignored indices before computing metrics
            valid_indices = (all_labels != self.IGNORE_INDEX)
            filtered_preds = all_preds[valid_indices]
            filtered_labels = all_labels[valid_indices]

            avg_test_loss = all_losses.mean()
            self.log("test_loss", avg_test_loss, sync_dist=True)

            test_perplexity = torch.exp(avg_test_loss)
            self.log("test_perplexity", test_perplexity, sync_dist=True)

            # Update metrics once with all collected data
            # Ensure num_classes is correctly passed to metrics during initialization
            if filtered_preds.numel() > 0: # Only compute if there are valid predictions
                self.metrics['accuracy'].update(filtered_preds, filtered_labels)
                self.metrics['f1_macro'].update(filtered_preds, filtered_labels)
                self.metrics['f1_weighted'].update(filtered_preds, filtered_labels)

                test_acc = self.metrics['accuracy'].compute()
                test_f1_macro = self.metrics['f1_macro'].compute()
                test_f1_weighted = self.metrics['f1_weighted'].compute()

                self.log("test_acc", test_acc, sync_dist=True)
                self.log("test_f1_macro", test_f1_macro, sync_dist=True)
                self.log("test_f1_weighted", test_f1_weighted, sync_dist=True)

                self.metrics['accuracy'].reset()
                self.metrics['f1_macro'].reset()
                self.metrics['f1_weighted'].reset()
            else:
                warnings.warn("Test set has no valid (non-PAD) tokens for metric computation.")

        self.test_step_outputs.clear()

class MutationClassificationModelModule(pl.LightningModule):
    """
    用于突变分类任务的LightningModule。
    """
    def __init__(self, model_name, model, num_classes=None, output_dir=None,
                 lr=0.001, model_args=None, data_args=None,
                 ignore_index=-100, # 分类任务通常不需要 ignore_index
                 weight_decay: float = 0.0, # 新增
                 lr_scheduler_type: str = 'none', # 新增
                 warmup_epochs: int = 0, # 新增
                 n_epoch: int = 10, # 新增，用于调度器
                 ):
        super().__init__()
        self.save_hyperparameters(ignore=['model'])
        if model_args is None: model_args = {}
        if data_args is None: data_args = {}
        self.lr = lr
        self.num_classes = num_classes if num_classes is not None else model.num_classes
        self.model_name = model_name
        self.model_args = model_args
        self.model = model
        self.IGNORE_INDEX = ignore_index
        self.data_args = data_args
        self.weight_decay = weight_decay # 保存
        self.lr_scheduler_type = lr_scheduler_type # 保存
        self.warmup_epochs = warmup_epochs # 保存
        self.n_epoch = n_epoch # 保存

        self.output_dir = output_dir
        if self.output_dir is not None:
            self.output_dir = Path(self.output_dir) / 'pred'
            self.output_dir.mkdir(parents=True, exist_ok=True)

        self.train_loss = None
        self.validation_step_outputs = []
        self.test_step_outputs = []
        metrics = {
            'accuracy': torchmetrics.classification.Accuracy(task="multiclass", num_classes=self.num_classes),
            'f1_macro': torchmetrics.classification.F1Score(task="multiclass", num_classes=self.num_classes, average='macro'),
            'auroc': torchmetrics.classification.AUROC(task="multiclass", num_classes=self.num_classes),
        }
        self.metrics = nn.ModuleDict(metrics)

    def get_progress_bar_dict(self):
        tqdm_dict = super().get_progress_bar_dict()
        tqdm_dict.pop('v_num', None)
        return tqdm_dict

    def configure_optimizers(self):
        optimizer = torch.optim.AdamW(self.parameters(), lr=self.lr, weight_decay=self.weight_decay)

        if self.lr_scheduler_type == 'none':
            return optimizer
        
        # 定义预热学习率函数
        def warmup_lambda(epoch):
            if epoch < self.warmup_epochs:
                return float(epoch) / float(max(1, self.warmup_epochs))
            return 1.0 # After warmup, factor is 1.0
        
        warmup_scheduler = LambdaLR(optimizer, lr_lambda=warmup_lambda)

        if self.lr_scheduler_type == 'cosine':
            # CosineAnnealingLR 在预热之后开始
            # T_max should be total epochs - warmup_epochs for the cosine part
            # Ensure T_max is at least 1 to avoid division by zero or negative T_max
            T_max_for_cosine = max(1, self.n_epoch - self.warmup_epochs)
            main_scheduler = CosineAnnealingLR(optimizer, T_max=T_max_for_cosine, eta_min=self.lr * 0.1) # Min LR is 10% of initial
            scheduler_config = {
                'scheduler': main_scheduler,
                'interval': 'epoch',
                'frequency': 1,
                'monitor': 'val_loss', # CosineAnnealingLR doesn't strictly need monitor, but PL often expects one for 'epoch' interval
                'name': 'CosineAnnealingLR'
            }
            if self.warmup_epochs > 0:
                return [optimizer], [
                    {'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'},
                    scheduler_config
                ]
            else:
                return [optimizer], [scheduler_config]

        elif self.lr_scheduler_type == 'plateau':
            main_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=3, verbose=True)
            scheduler_config = {
                'scheduler': main_scheduler,
                'interval': 'epoch',
                'frequency': 1,
                'monitor': 'val_loss', # ReduceLROnPlateau requires monitor
                'name': 'ReduceLROnPlateau'
            }
            if self.warmup_epochs > 0:
                return [optimizer], [
                    {'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'},
                    scheduler_config
                ]
            else:
                return [optimizer], [scheduler_config]
        
        # Fallback if scheduler type is not recognized but not 'none'
        warnings.warn(f"Unknown lr_scheduler_type: {self.lr_scheduler_type}. No scheduler will be used beyond warmup.")
        if self.warmup_epochs > 0:
            return [optimizer], [{'scheduler': warmup_scheduler, 'interval': 'epoch', 'frequency': 1, 'name': 'WarmupLR'}]
        return optimizer

    def forward(self, x):
        # 这里的 forward 方法应该直接返回模型的核心输出，或者根据需要调整
        # 对于分类任务，通常是 logits
        model_outputs = self.model(x)
        if isinstance(model_outputs, dict) and 'logits' in model_outputs:
            return model_outputs['logits']
        return model_outputs # 否则直接返回，以防模型forward不返回字典

    def on_train_start(self):
        log_hyperparams = {
            "model_name": self.model_name,
            "num_classes": self.num_classes,
            "data_args": self.data_args,
            "lr": self.lr,
            "weight_decay": self.weight_decay, # 新增
            "lr_scheduler_type": self.lr_scheduler_type, # 新增
            "warmup_epochs": self.warmup_epochs, # 新增
            "n_epoch": self.n_epoch, # 新增
        }
        log_hyperparams.update(self.model_args)
        self.logger.log_hyperparams(log_hyperparams)

    def training_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) # 调用模型
        logits = model_outputs['logits'] # 从字典中提取 logits
        loss = F.cross_entropy(logits, y)
        self.train_loss = loss.detach()
        self.log("train_loss", self.train_loss, prog_bar=True, sync_dist=True)
        return loss

    def validation_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) # 调用模型
        logits = model_outputs['logits'] # 从字典中提取 logits
        loss = F.cross_entropy(logits, y)

        preds = torch.argmax(logits, dim=-1)

        self.metrics['accuracy'].update(preds, y)
        self.metrics['f1_macro'].update(preds, y)
        self.metrics['auroc'].update(logits, y)

        self.validation_step_outputs.append({'loss': loss, 'preds': preds, 'y': y, 'logits': logits})

    def on_validation_epoch_end(self):
        if self.validation_step_outputs:
            all_losses = torch.stack([x['loss'] for x in self.validation_step_outputs])
            # all_preds = torch.cat([x['preds'] for x in self.validation_step_outputs], dim=0) # Already updated in metrics
            # all_labels = torch.cat([x['y'] for x in self.validation_step_outputs], dim=0) # Already updated in metrics
            # all_logits = torch.cat([x['logits'] for x in self.validation_step_outputs], dim=0) # Already updated in metrics

            avg_val_loss = all_losses.mean()
            self.log("val_loss", avg_val_loss, prog_bar=True, sync_dist=True)

            val_acc = self.metrics['accuracy'].compute()
            val_f1 = self.metrics['f1_macro'].compute()
            val_auroc = self.metrics['auroc'].compute()

            self.log("val_acc", val_acc, prog_bar=True, sync_dist=True)
            self.log("val_f1", val_f1, prog_bar=False, sync_dist=True)
            self.log("val_auroc", val_auroc, prog_bar=False, sync_dist=True)

            self.metrics['accuracy'].reset()
            self.metrics['f1_macro'].reset()
            self.metrics['auroc'].reset()
        self.validation_step_outputs.clear()

    def test_step(self, batch, batch_idx):
        x, y = batch
        model_outputs = self.model(x) # 调用模型
        logits = model_outputs['logits'] # 从字典中提取 logits
        self.test_step_outputs.append({'pred_logits': logits, 'true_label': y})
        return {'pred_logits': logits, 'true_label': y}

    def on_test_epoch_end(self):
        collected_outputs = self.test_step_outputs

        if collected_outputs:
            preds_logits = torch.cat([x['pred_logits'] for x in collected_outputs], dim=0)
            true_labels = torch.cat([x['true_label'] for x in collected_outputs], dim=0)

            test_preds = torch.argmax(preds_logits, dim=-1)
            test_acc = self.metrics['accuracy'](test_preds, true_labels)
            test_f1 = self.metrics['f1_macro'](test_preds, true_labels)
            test_auroc = self.metrics['auroc'](preds_logits, true_labels)

            self.log("test_acc", test_acc, sync_dist=True)
            self.log("test_f1", test_f1, sync_dist=True)
            self.log("test_auroc", test_auroc, sync_dist=True)

            global_rank = self.global_rank
            output_path = self.output_dir / f'test_pred.{global_rank}.csv'

            df_pred = pd.DataFrame({
                'true_label': true_labels.cpu().numpy(),
                'pred_label': test_preds.cpu().numpy(),
            })
            probs = torch.softmax(preds_logits, dim=-1)
            for i in range(probs.size(-1)):
                df_pred[f'prob_class_{i}'] = probs[:, i].cpu().numpy()

            df_pred.to_csv(output_path, index=False)
        self.test_step_outputs.clear()

class MutationLightRunner(object):
    def __init__(self, valid_ratio=0.2, gpus=0,
                 batch_size=8, num_workers=0, 
                 pin_memory=True,
                 not_find_unused=False,
                 static_graph=False,
                 fp16=False, accelerator='cpu',
                 monitor_name='val_loss', monitor_mode='min',
                 ):
        super(MutationLightRunner, self).__init__()
        
        if gpus > 0:
            self._accelerator = 'cuda'
            self._devices = gpus
        else:
            self._accelerator = 'cpu'
            self._devices = 1 

        self._num_workers = num_workers
        self._valid_ratio = valid_ratio
        self._pin_memory = pin_memory
        self._not_find_unused = not_find_unused
        self._static_graph = static_graph
        self._fp16 = fp16
        self._monitor_name = monitor_name
        self._monitor_mode = monitor_mode

        if self._accelerator == 'mps':
            self._strategy = None
        else:
            self._strategy = DDPStrategy(static_graph=self._static_graph, find_unused_parameters=True)

    def train(self, task, df_path, data_root, output_dir, *, col_group='dataset', debug=False,
              model_name='performer',
              shuffle=True, lr=0.001, n_epoch=10, patience=10,
              model_args={}, data_args={}, module_args={}, data_config=None,
              model_checkpoint=None,
              pretrain_model_path=None,
              clinical_df_path=None,
              col_label='clin_CANCER_TYPE_manual_mapped_id',
              col_sequencing_method='sequencing_method_str_id',
              col_exon_strand='exon_strand_id', 
              gradient_clip_val=0.0,
              limit_train_batches=1.0,
              limit_val_batches=1.0,
              bucket_batch_size: int = 32,
              bucket_boundaries: Optional[List[int]] = None,
              stochastic_chrom_shuffling: bool = False,
              random_unknown_strand_prob: float = 0.01,
              evo2_embeddings_dir: Optional[str] = None,
              # 修改 evo2_embedding_layer 为 Union[str, List[str]]
              evo2_embedding_layer: Optional[Union[str, List[str]]] = None, 
              resume_from_checkpoint_path: Optional[str] = None,
              auto_resume_from_last_checkpoint: bool = False,
              use_absolute_genomic_pos_emb: bool = True,
              max_genomic_position: int = 300_000_000,
              max_seq_len: int = 512, # 新增参数
              weight_decay: float = 0.0, # 新增参数
              lr_scheduler_type: str = 'none', # 新增参数
              warmup_epochs: int = 0, # 新增参数
              ):
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

        if data_config is not None:
            data_args_default = json.loads(Path(data_config).read_text())
            data_args_default.update(data_args)
            data_args = data_args_default
        
        data_args['stochastic_chrom_shuffling'] = stochastic_chrom_shuffling
        data_args['random_unknown_strand_prob'] = random_unknown_strand_prob
        data_args['max_seq_len'] = max_seq_len # 传递 max_seq_len

        data_module = MutationDataModule(df_path, data_root=data_root, task=task, data_args=data_args,
                                      col_group=col_group,
                                      num_workers=self._num_workers,
                                      pin_memory=self._pin_memory, shuffle=shuffle,
                                      clinical_df_path=clinical_df_path,
                                      col_label=col_label,
                                      col_sequencing_method=col_sequencing_method,
                                      col_exon_strand=col_exon_strand, 
                                      bucket_batch_size=bucket_batch_size,
                                      bucket_boundaries=bucket_boundaries,
                                      stochastic_chrom_shuffling=stochastic_chrom_shuffling,
                                      random_unknown_strand_prob=random_unknown_strand_prob,
                                      evo2_embeddings_dir=evo2_embeddings_dir,
                                      evo2_embedding_layer=evo2_embedding_layer, # 传递 evo2_embedding_layer (可以是字符串或列表)
                                      max_seq_len=max_seq_len, # 新增传递
                                      )
        data_module.setup()

        vocab_sizes = {
            'num_pentanucleotide_contexts': data_module.ds_train.num_pentanucleotide_contexts, 
            'num_chromosome_ids': data_module.ds_train.num_chromosome_ids,
            'num_exon_strand_ids': data_module.ds_train.num_exon_strand_ids,
            'num_sequencing_methods': data_module.ds_train.num_sequencing_methods,
            'evo2_embedding_dim': data_module.ds_train.evo2_embedding_dim, # 现在是组合后的总维度
            'num_relative_chrom_positions': int(data_module.ds_train.max_relative_chrom_pos_seen) + 1, # 使用实际计算出的最大值
        }

        if task == 'pretrain':
            num_classes_for_model = vocab_sizes['num_pentanucleotide_contexts'] + 2 
            ModelModule = MutationPretrainModelModule
        elif task == 'classify':
            num_classes_for_model = data_module.ds_train.num_classes
            ModelModule = MutationClassificationModelModule
        else:
            raise NotImplementedError(f'Not implemented task: {self.task}')

        final_model_args = model_args.copy()
        final_model_args.update(vocab_sizes)
        if task == 'classify':
            final_model_args['num_classes'] = num_classes_for_model

        final_model_args['use_absolute_genomic_pos_emb'] = use_absolute_genomic_pos_emb
        final_model_args['max_genomic_position'] = max_genomic_position

        model = get_model(task=task, model_name=model_name, num_classes=num_classes_for_model,
                          model_path=pretrain_model_path,
                          model_args=final_model_args)

        module_args_final = dict(model_name=model_name, model=model, lr=lr,
                           output_dir=output_dir, model_args=final_model_args, data_args=data_args,
                           num_classes=num_classes_for_model,
                           weight_decay=weight_decay, # 新增
                           lr_scheduler_type=lr_scheduler_type, # 新增
                           warmup_epochs=warmup_epochs, # 新增
                           n_epoch=n_epoch, # 新增
                           )

        actual_resume_path = None
        if resume_from_checkpoint_path:
            actual_resume_path = Path(resume_from_checkpoint_path)
            if not actual_resume_path.exists():
                raise FileNotFoundError(f"指定的检查点文件未找到: {actual_resume_path}")
            print(f"将从指定的检查点恢复训练: {actual_resume_path}")
        elif auto_resume_from_last_checkpoint:
            log_dir_base = output_dir / 'log'
            if log_dir_base.exists():
                version_dirs = sorted(log_dir_base.glob('version_*'))
                if version_dirs:
                    latest_version_dir = version_dirs[-1]
                    last_checkpoint_path = latest_version_dir / 'checkpoint' / 'last.ckpt'
                    if last_checkpoint_path.exists():
                        actual_resume_path = last_checkpoint_path
                        print(f"检测到最新检查点，将从 {actual_resume_path} 恢复训练。")
                    else:
                        print(f"在最新版本目录 {latest_version_dir} 中未找到 'last.ckpt'。将从头开始训练。")
                else:
                    print(f"在 {log_dir_base} 中未找到任何版本目录。将从头开始训练。")
            else:
                print(f"日志目录 {log_dir_base} 不存在。将从头开始训练。")

        if model_checkpoint is None:
            model_module = ModelModule(**module_args_final)
        else:
            warnings.warn("`model_checkpoint` parameter is for loading a model's weights, not for resuming trainer state. If you want to resume training, use `resume_from_checkpoint_path` or `auto_resume_from_last_checkpoint`.")
            model_module = ModelModule.load_from_checkpoint(model_checkpoint, **module_args_final)

        log_dir = output_dir / 'log'
        logger_csv = CSVLogger(str(log_dir))
        version_dir = Path(logger_csv.log_dir)

        trainer_args = {}
        if self._fp16:
            trainer_args.update(dict(precision=16))
        else:
            trainer_args.update(dict(precision=32))

        checkpoint_callback = ModelCheckpoint(
            dirpath=(version_dir / 'checkpoint'),
            filename='{epoch}-{val_loss:.3f}',
            monitor=self._monitor_name,
            mode=self._monitor_mode,
            save_last=True, # Save the last checkpoint
            save_top_k=3,   # Keep top 3 best models
            verbose=True
        )

        trainer = Trainer(
            accelerator=self._accelerator,
            devices=self._devices,
            max_epochs=n_epoch,
            logger=[
                ConsoleLogger(),
                logger_csv,
            ],
            callbacks=[
                EarlyStopping(monitor=self._monitor_name, mode=self._monitor_mode, patience=patience),
                checkpoint_callback,
                TQDMProgressBar(refresh_rate=1),
            ],
            strategy=self._strategy,

            gradient_clip_val=gradient_clip_val,
            limit_train_batches=limit_train_batches,
            limit_val_batches=limit_val_batches,
            **trainer_args,
        )
        trainer.fit(model_module, datamodule=data_module, ckpt_path=actual_resume_path)
        if torch.distributed.is_initialized():
            torch.distributed.barrier()

        if trainer.checkpoint_callback is not None and trainer.checkpoint_callback.best_model_path:
            print(f"加载最佳模型进行测试: {trainer.checkpoint_callback.best_model_path}")
            model_module = ModelModule.load_from_checkpoint(trainer.checkpoint_callback.best_model_path, **module_args_final)
        elif actual_resume_path:
             print(f"未找到新的最佳模型。如果从检查点恢复，将加载最后一次的检查点 ({actual_resume_path}) 进行测试。")
             model_module = ModelModule.load_from_checkpoint(actual_resume_path, **module_args_final)
        else:
            warnings.warn("No best model checkpoint found. Using the last trained model for testing.")
            pass # Use the model_module as it is (likely the last state from training)

        dl_test = data_module.test_dataloader()

        trainer.test(model_module, dataloaders=dl_test)

        if torch.distributed.is_initialized():
            torch.distributed.barrier()

        if trainer.global_rank == 0:
            paths = sorted((output_dir / 'pred').glob('*.csv'))
            dfs_pred = []
            for path in paths:
                df_pred = pd.read_csv(path)
                dfs_pred += [df_pred]
            if len(dfs_pred) > 0:
                df_pred_final = pd.concat(dfs_pred)
                df_pred_final.to_csv(output_dir / 'test_pred.csv', index=False)

        if trainer.global_rank == 0:
            model = model_module.model
            model_config = final_model_args.copy()
            model_config['task'] = task
            model_config['use_absolute_genomic_pos_emb'] = use_absolute_genomic_pos_emb
            model_config['max_genomic_position'] = max_genomic_position
            model_config['evo2_embedding_dim'] = vocab_sizes['evo2_embedding_dim']
            model_config['num_pentanucleotide_contexts'] = vocab_sizes['num_pentanucleotide_contexts'] 
            model_config['num_relative_chrom_positions'] = vocab_sizes['num_relative_chrom_positions'] # 新增

            (output_dir / 'model_config.json').write_text(json.dumps(model_config, indent=2))
            torch.save(model.state_dict(), str(output_dir / 'state_dict.zip'))
            torch.save(model, str(output_dir / 'model.pt'))
            torch.save(model.state_dict(), str(version_dir / 'state_dict.zip'))
            torch.save(model, str(version_dir / 'model.pt'))

        if torch.distributed.is_initialized():
            torch.distributed.barrier()

    def test(self, task, df_path, data_root, output_dir, model_path, col_group='dataset', debug=False,
             data_args={},
             clinical_df_path=None,
             col_label='clin_CANCER_TYPE_manual_mapped_id',
             col_sequencing_method='sequencing_method_str_id',
             col_exon_strand='exon_strand_id',
             col_id=None, split_i=None, split_n=None, split_salt='',
             bucket_batch_size: int = 32,
             bucket_boundaries: Optional[List[int]] = None,
             stochastic_chrom_shuffling: bool = False,
             random_unknown_strand_prob: float = 0.01,
             evo2_embeddings_dir: Optional[str] = None,
             # 修改 evo2_embedding_layer 为 Union[str, List[str]]
             evo2_embedding_layer: Optional[Union[str, List[str]]] = None, 
             use_absolute_genomic_pos_emb: bool = True,
             max_genomic_position: int = 300_000_000,
             max_seq_len: int = 512, # 新增参数
             ):
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

        model_config_path = Path(model_path).parent / 'model_config.json'
        task_from_model = task
        num_classes_from_model = 1
        model_args_from_model = {}
        evo2_embedding_dim_from_model = 0
        use_absolute_genomic_pos_emb_from_model = use_absolute_genomic_pos_emb
        max_genomic_position_from_model = max_genomic_position
        num_pentanucleotide_contexts_from_model = 0 
        num_relative_chrom_positions_from_model = max_seq_len + 1 # 移除硬编码，从模型配置中读取

        if model_config_path.exists():
            model_config = json.loads(model_config_path.read_text())
            task_from_model = model_config.get('task', task)
            num_classes_from_model = model_config.get('num_classes', 1)
            evo2_embedding_dim_from_model = model_config.get('evo2_embedding_dim', 0)
            use_absolute_genomic_pos_emb_from_model = model_config.get('use_absolute_genomic_pos_emb', use_absolute_genomic_pos_emb)
            max_genomic_position_from_model = model_config.get('max_genomic_position', max_genomic_position)
            num_pentanucleotide_contexts_from_model = model_config.get('num_pentanucleotide_contexts', 0) 
            num_relative_chrom_positions_from_model = model_config.get('num_relative_chrom_positions', max_seq_len + 1) # 从模型配置中读取，提供默认值

            model_args_from_model = {k: v for k, v in model_config.items() if k not in ['task', 'num_classes']}
            if task_from_model == 'classify':
                model_args_from_model['num_classes'] = num_classes_from_model
        else:
            warnings.warn("model_config.json not found. Using default/dummy model args for testing. This might lead to errors if vocab sizes are mismatched.")
            model_args_from_model = {} 
            # 如果没有 model_config.json，则 num_relative_chrom_positions_from_model 会使用默认值
        
        model_args_from_model['evo2_embedding_dim'] = evo2_embedding_dim_from_model
        model_args_from_model['use_absolute_genomic_pos_emb'] = use_absolute_genomic_pos_emb_from_model
        model_args_from_model['max_genomic_position'] = max_genomic_position_from_model
        model_args_from_model['num_pentanucleotide_contexts'] = num_pentanucleotide_contexts_from_model 
        model_args_from_model['num_relative_chrom_positions'] = num_relative_chrom_positions_from_model # 新增

        model = get_model(task=task_from_model, model_path=model_path, num_classes=num_classes_from_model, model_args=model_args_from_model)
        
        if task_from_model == 'pretrain':
            module_args_num_classes = model.lm_head.out_features
        elif task_from_model == 'classify':
            module_args_num_classes = model.num_classes
        else:
            module_args_num_classes = num_classes_from_model

        module_args = dict(model_name=model_path, model=model,
                           output_dir=output_dir, model_args=model_args_from_model, data_args=data_args,
                           num_classes=module_args_num_classes,
                           weight_decay=0.0, # 测试时通常不应用 weight_decay
                           lr_scheduler_type='none', # 测试时不需要调度器
                           warmup_epochs=0,
                           n_epoch=0, # 测试时不需要 epoch 数量
                           )

        data_args_final = data_args.copy()
        data_args_final['stochastic_chrom_shuffling'] = stochastic_chrom_shuffling
        data_args_final['random_unknown_strand_prob'] = random_unknown_strand_prob
        data_args_final['max_seq_len'] = max_seq_len # 传递 max_seq_len

        data_module = MutationDataModule(df_path, data_root=data_root, task=task_from_model, data_args=data_args_final,
                                      col_group=col_group,
                                      num_workers=self._num_workers,
                                      pin_memory=self._pin_memory, shuffle=False,
                                      clinical_df_path=clinical_df_path,
                                      col_label=col_label,
                                      col_sequencing_method=col_sequencing_method,
                                      col_exon_strand=col_exon_strand,
                                      bucket_batch_size=bucket_batch_size,
                                      bucket_boundaries=bucket_boundaries,
                                      stochastic_chrom_shuffling=stochastic_chrom_shuffling,
                                      random_unknown_strand_prob=random_unknown_strand_prob,
                                      evo2_embeddings_dir=evo2_embeddings_dir,
                                      evo2_embedding_layer=evo2_embedding_layer,
                                      max_seq_len=max_seq_len, # 新增传递
                                      )

        log_dir = output_dir / 'log'
        logger_csv = CSVLogger(str(log_dir))
        trainer = Trainer(
            accelerator=self._accelerator,
            devices=self._devices,
            max_epochs=0,
            logger=[
                logger_csv,
            ],
            callbacks=[
                TQDMProgressBar(refresh_rate=1),
            ],
            strategy=self._strategy,
        )

        data_module.setup()
        dl_test = data_module.test_dataloader()
        trainer.test(model_module, dataloaders=dl_test)

        if torch.distributed.is_initialized():
            torch.distributed.barrier()
        if trainer.global_rank == 0:
            paths = sorted((output_dir / 'pred').glob('*.csv'))
            dfs_pred = []
            for path in paths:
                df_pred = pd.read_csv(path)
                dfs_pred += [df_pred]
            if len(dfs_pred) > 0:
                df_pred_final = pd.concat(dfs_pred)
                df_pred_final.to_csv(output_dir / 'test_pred.csv', index=False)


if __name__ == '__main__':
    fire.Fire(MutationLightRunner)