import diskcache
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import json
import warnings
import torch
from tqdm import tqdm # 导入 tqdm 用于显示进度
import math
from pathlib import Path
from typing import Optional, List, Union # 导入 Union

def read_df_path(df_path):
    """
    如果df_path是DataFrame，则直接使用其副本；
    如果是文件路径，则从parquet文件读取。
    """
    if isinstance(df_path, pd.DataFrame):
        df = df_path.copy()
    else:
        df_path = Path(df_path)
        if not df_path.exists():
            raise FileNotFoundError(f"数据文件未找到: {df_path}")
        df = pd.read_parquet(df_path)
    return df

class MutationPretrainDataset(Dataset):
    """
    用于加载和处理DNA突变数据的PyTorch Dataset。
    支持预训练任务的掩码操作。
    """
    def __init__(self, df_path, data_root,
                 col_sample_id='sampleId',
                 mask_ratio=0.15, 
                 max_seq_len=512, 
                 training=True, 
                 cache_dir=None,
                 col_sequencing_method='sequencing_method_str_id',
                 col_exon_strand='exon_strand_id', 
                 stochastic_chrom_shuffling: bool = False, 
                 random_unknown_strand_prob: float = 0.01, 
                 evo2_embeddings_dir: Optional[str] = None,
                 evo2_embedding_layer: Optional[Union[str, List[str]]] = None, 
                 ):
        super().__init__()
        self.col_sample_id = col_sample_id
        self.training = training
        self.mask_ratio = mask_ratio 
        self.max_seq_len = max_seq_len
        self.col_sequencing_method = col_sequencing_method
        self.col_exon_strand = col_exon_strand 
        self.stochastic_chrom_shuffling = stochastic_chrom_shuffling
        self.random_unknown_strand_prob = random_unknown_strand_prob

        self.df_all_mutations = read_df_path(df_path)

        self.sample_ids = self.df_all_mutations[self.col_sample_id].unique().tolist()
        
        self.sample_id_to_mutation_count = self.df_all_mutations.groupby(self.col_sample_id).size().to_dict()

        self.output_dir = Path(data_root)

        print("Pre-calculating max_relative_chrom_pos_id for dataset...")
        df_temp_sorted = self.df_all_mutations.sort_values(by=[self.col_sample_id, 'chromosome_id', 'startPosition']).reset_index(drop=True)
        df_temp_sorted['relative_chrom_pos_id_temp'] = df_temp_sorted.groupby([self.col_sample_id, 'chromosome_id'], sort=False).cumcount()
        
        if not df_temp_sorted.empty:
            self.max_relative_chrom_pos_seen = df_temp_sorted['relative_chrom_pos_id_temp'].max()
        else:
            self.max_relative_chrom_pos_seen = 0
        
        print(f"Dataset's max_relative_chrom_pos_id: {self.max_relative_chrom_pos_seen}")

        self.load_vocabularies() 

        # 更新：现在只有 Unknown, WGS, WES, Targeted
        self.unknown_method_id = self.sequencing_method_to_id.get('Unknown', 0)
        self.wgs_id = self.sequencing_method_to_id.get('WGS', -1)
        self.wes_id = self.sequencing_method_to_id.get('WES', -1)
        self.targeted_id = self.sequencing_method_to_id.get('Targeted', -1) # 新增 Targeted ID
        # self.assay_id 不再需要

        self.cache_dir = cache_dir
        if self.cache_dir is None:
            self.diskcache = None
        else:
            self.diskcache = diskcache.Cache(directory=self.cache_dir, eviction_policy='none')

        self.evo2_embeddings_dir = Path(evo2_embeddings_dir) if evo2_embeddings_dir else None
        self.evo2_embedding_layers = [evo2_embedding_layer] if isinstance(evo2_embedding_layer, str) else (evo2_embedding_layer if evo2_embedding_layer is not None else [])
        
        self.ref_evo2_embeddings_list = []
        self.mutation_embedding_index_map = None
        self.evo2_embedding_dim = 0

        if self.evo2_embeddings_dir and self.evo2_embedding_layers:
            self._load_evo2_embeddings()
        elif self.evo2_embeddings_dir and not self.evo2_embedding_layers:
            warnings.warn("提供了 evo2_embeddings_dir 但未指定 evo2_embedding_layer(s)。将不加载 Evo-2 嵌入。")
        elif not self.evo2_embeddings_dir and self.evo2_embedding_layers:
            warnings.warn("指定了 evo2_embedding_layer(s) 但未提供 evo2_embeddings_dir。将不加载 Evo-2 嵌入。")
 
    def _load_evo2_embeddings(self):
        """加载 Evo-2 嵌入文件和索引映射。"""
        print(f"正在加载 Evo-2 嵌入层(s) '{self.evo2_embedding_layers}'...")
        
        index_map_path = self.evo2_embeddings_dir / "mutation_embedding_index_map.json"
        if not index_map_path.exists():
            raise FileNotFoundError(f"Evo-2 嵌入索引映射文件未找到: {index_map_path}")
        with open(index_map_path, "r") as f:
            self.mutation_embedding_index_map = json.load(f)

        total_dim = 0
        for layer_name in self.evo2_embedding_layers:
            safe_layer_name = layer_name.replace('.', '_').replace('/', '_')
            ref_embeddings_path = self.evo2_embeddings_dir / f"ref_seq_embeddings_{safe_layer_name}.npy"

            if not ref_embeddings_path.exists():
                raise FileNotFoundError(f"Evo-2 参考序列嵌入文件未找到: {ref_embeddings_path}")
            
            ref_emb = np.load(ref_embeddings_path, mmap_mode='r')
            self.ref_evo2_embeddings_list.append(ref_emb)
            total_dim += ref_emb.shape[1]
        
        self.evo2_embedding_dim = total_dim
        print(f"Evo-2 嵌入加载成功。总维度: {self.evo2_embedding_dim} (来自 {len(self.evo2_embedding_layers)} 层)")


    def load_vocabularies(self):
        """加载各种特征的词汇表映射。"""
        with open(self.output_dir / "pentanucleotide_context_to_id.json", "r") as f:
            self.pentanucleotide_context_to_id = json.load(f)

        self.num_pentanucleotide_contexts = len(self.pentanucleotide_context_to_id)

        self.PAD_ID = 0 
        self.MASK_ID = self.num_pentanucleotide_contexts + 1 
        self.num_chromosome_ids = 25 + 1 # Assuming 1-22, X, Y, M + PAD_ID (0)

        try:
            with open(self.output_dir / "exon_strand_to_id.json", "r") as f:
                self.exon_strand_to_id = json.load(f)
            self.num_exon_strand_ids = len(self.exon_strand_to_id)
        except FileNotFoundError:
            warnings.warn(f"未找到组合外显子/链映射文件 'exon_strand_to_id.json'。将 num_exon_strand_ids 设置为 1 (例如，所有都是未知/默认)。")
            self.exon_strand_to_id = {'Unknown_Unknown_NoGeneAnnotated': 0} 
            self.num_exon_strand_ids = 1

        try:
            with open(self.output_dir / "sequencing_method_str_to_id.json", "r") as f:
                self.sequencing_method_to_id = json.load(f)
            self.num_sequencing_methods = len(self.sequencing_method_to_id)
        except FileNotFoundError:
            warnings.warn(f"未找到测序方法映射文件 'sequencing_method_str_to_id.json'。将 num_sequencing_methods 设置为 1 (例如，所有都是未知/默认)。")
            self.sequencing_method_to_id = {'Unknown': 0} 
            self.num_sequencing_methods = 1


    def __len__(self):
        return len(self.sample_ids)

    def get_sample_cache(self, key, cache):
        if cache is None:
            values = self._get_sample_mutations(key)
            if values is None:
                warnings.warn(f"_get_sample_mutations returned None for sample_id: {key}. Returning empty dict.")
                return {
                    'pentanucleotide_context_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                    'chromosome_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                    'original_positions': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                    'relative_chrom_pos_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                    'exon_strand_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                    'sequencing_method_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                    'ref_evo2_embeddings': np.zeros((self.max_seq_len, self.evo2_embedding_dim), dtype=np.float32),
                    'attention_mask': np.full(self.max_seq_len, 0, dtype=np.int64) 
                }
            return values

        if key in cache:
            values = cache[key]
            if values is None:
                warnings.warn(f"Cached value is None for sample_id: {key}. Re-fetching.")
                values = self._get_sample_mutations(key)
                if values is None:
                    warnings.warn(f"Re-fetch also returned None for sample_id: {key}. Returning empty dict.")
                    return {
                        'pentanucleotide_context_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                        'chromosome_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                        'original_positions': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                        'relative_chrom_pos_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                        'exon_strand_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                        'sequencing_method_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                        'ref_evo2_embeddings': np.zeros((self.max_seq_len, self.evo2_embedding_dim), dtype=np.float32),
                        'attention_mask': np.full(self.max_seq_len, 0, dtype=np.int64) 
                    }
                cache[key] = values
            return values

        values = self._get_sample_mutations(key)
        if values is None:
            warnings.warn(f"_get_sample_mutations returned None for sample_id: {key}. Not caching, returning empty dict.")
            return {
                'pentanucleotide_context_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                'chromosome_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'original_positions': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'relative_chrom_pos_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'exon_strand_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'sequencing_method_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                'ref_evo2_embeddings': np.zeros((self.max_seq_len, self.evo2_embedding_dim), dtype=np.float32),
                'attention_mask': np.full(self.max_seq_len, 0, dtype=np.int64) 
            }
        cache[key] = values
        return values

    def _get_sample_mutations(self, sample_id):
        """
        从完整的DataFrame中提取并处理单个样本的突变数据。
        """
        df_sample = self.df_all_mutations[self.df_all_mutations[self.col_sample_id] == sample_id].copy()
        df_sample = df_sample.sort_values(by=['chromosome_id', 'startPosition'])

        if self.stochastic_chrom_shuffling and self.training: 
            shuffled_indices = []
            grouped_by_chrom = df_sample.groupby('chromosome_id', sort=False) 
            
            unique_chrom_ids = list(grouped_by_chrom.groups.keys())
            np.random.shuffle(unique_chrom_ids) 

            for chrom_id in unique_chrom_ids:
                chrom_group_indices = grouped_by_chrom.groups[chrom_id].tolist()
                shuffled_indices.extend(chrom_group_indices)
            
            df_sample = df_sample.loc[shuffled_indices].reset_index(drop=True)

        df_sample['relative_chrom_pos_id'] = df_sample.groupby('chromosome_id', sort=False).cumcount()
        
        pentanucleotide_context_ids = df_sample['pentanucleotide_context_id'].values
        chromosome_ids = df_sample['chromosome_id'].values
        original_positions = df_sample['startPosition'].values 
        relative_chrom_pos_ids = df_sample['relative_chrom_pos_id'].values
        exon_strand_ids = df_sample[self.col_exon_strand].values
        sequencing_method_ids = df_sample[self.col_sequencing_method].values 

        current_len = len(pentanucleotide_context_ids) 

        sample_sequencing_method_id = self.unknown_method_id 
        if current_len > 0 and len(sequencing_method_ids) > 0:
            sample_sequencing_method_id = sequencing_method_ids[0]

        if current_len == 0:
            warnings.warn(f"样本 {sample_id} 没有突变数据。返回填充的空序列。")

            inputs = {
                'pentanucleotide_context_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                'chromosome_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'original_positions': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'relative_chrom_pos_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'exon_strand_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64),
                'sequencing_method_ids': np.full(self.max_seq_len, self.PAD_ID, dtype=np.int64), 
                'ref_evo2_embeddings': np.zeros((self.max_seq_len, self.evo2_embedding_dim), dtype=np.float32),
                'attention_mask': np.full(self.max_seq_len, 0, dtype=np.int64) 
            }
            return inputs

        ref_evo2_embeddings_combined = np.zeros((current_len, self.evo2_embedding_dim), dtype=np.float32)
        if self.evo2_embedding_dim > 0 and self.ref_evo2_embeddings_list: 
            all_layer_embeddings = []
            
            df_sample_with_key = df_sample.copy()
            if 'startPosition' in self.df_all_mutations.columns:
                df_sample_with_key['unique_key'] = df_sample_with_key['sampleId'].astype(str) + '_' + \
                                                   df_sample_with_key['chromosome_id'].astype(str) + '_' + \
                                                   df_sample_with_key['startPosition'].astype(str)
            else:
                warnings.warn(f"DataFrame for sample {sample_id} is missing 'startPosition' column. Using 'pos_bin_id' for Evo-2 unique key. This might lead to mismatches if Evo-2 embeddings were generated with 'startPosition'.")
                df_sample_with_key['unique_key'] = df_sample_with_key['sampleId'].astype(str) + '_' + \
                                                   df_sample_with_key['chromosome_id'].astype(str) + '_' + \
                                                   df_sample_with_key['pos_bin_id'].astype(str)
            
            for ref_evo2_embeddings_single_layer in self.ref_evo2_embeddings_list:
                single_layer_embeddings = []
                for _, row in df_sample_with_key.iterrows():
                    key = row['unique_key']
                    if key in self.mutation_embedding_index_map:
                        idx = self.mutation_embedding_index_map[key]
                        single_layer_embeddings.append(ref_evo2_embeddings_single_layer[idx])
                    else:
                        single_layer_embeddings.append(np.zeros(ref_evo2_embeddings_single_layer.shape[1], dtype=np.float32))
                all_layer_embeddings.append(np.array(single_layer_embeddings, dtype=np.float32))
            
            ref_evo2_embeddings_combined = np.concatenate(all_layer_embeddings, axis=-1)


        if current_len > self.max_seq_len:
            # random_crop 逻辑保持不变，因为 WGS 和 WES 仍然是独立类别
            should_random_crop = (
                sample_sequencing_method_id == self.wgs_id or 
                sample_sequencing_method_id == self.wes_id or
                sample_sequencing_method_id == self.unknown_method_id
            )

            if should_random_crop:
                start_offset = np.random.randint(0, current_len - self.max_seq_len + 1)
                end_offset = start_offset + self.max_seq_len

                pentanucleotide_context_ids = pentanucleotide_context_ids[start_offset:end_offset] 
                chromosome_ids = chromosome_ids[start_offset:end_offset]
                original_positions = original_positions[start_offset:end_offset] 
                relative_chrom_pos_ids = relative_chrom_pos_ids[start_offset:end_offset]
                exon_strand_ids = exon_strand_ids[start_offset:end_offset]
                sequencing_method_ids = sequencing_method_ids[start_offset:end_offset]
                ref_evo2_embeddings_combined = ref_evo2_embeddings_combined[start_offset:end_offset]
                current_len = self.max_seq_len
            else:
                pentanucleotide_context_ids = pentanucleotide_context_ids[:self.max_seq_len] 
                chromosome_ids = chromosome_ids[:self.max_seq_len]
                original_positions = original_positions[:self.max_seq_len] 
                relative_chrom_pos_ids = relative_chrom_pos_ids[:self.max_seq_len]
                exon_strand_ids = exon_strand_ids[:self.max_seq_len]
                sequencing_method_ids = sequencing_method_ids[:self.max_seq_len]
                ref_evo2_embeddings_combined = ref_evo2_embeddings_combined[:self.max_seq_len]
                current_len = self.max_seq_len
    
        if current_len < self.max_seq_len:
            pad_len = self.max_seq_len - current_len
            pentanucleotide_context_ids = np.pad(pentanucleotide_context_ids, (0, pad_len), 'constant', constant_values=self.PAD_ID) 
            chromosome_ids = np.pad(chromosome_ids, (0, pad_len), 'constant', constant_values=self.PAD_ID)
            original_positions = np.pad(original_positions, (0, pad_len), 'constant', constant_values=self.PAD_ID) 
            relative_chrom_pos_ids = np.pad(relative_chrom_pos_ids, (0, pad_len), 'constant', constant_values=self.PAD_ID)
            exon_strand_ids = np.pad(exon_strand_ids, (0, pad_len), 'constant', constant_values=self.PAD_ID)
            
            fill_method_id_for_padding = sample_sequencing_method_id
            sequencing_method_ids = np.pad(sequencing_method_ids, (0, pad_len), 'constant', constant_values=fill_method_id_for_padding)
            
            ref_evo2_embeddings_combined = np.pad(ref_evo2_embeddings_combined, ((0, pad_len), (0, 0)), 'constant', constant_values=0.0)

        attention_mask_base = (pentanucleotide_context_ids != self.PAD_ID).astype(np.int64)

        inputs = {
            'pentanucleotide_context_ids': pentanucleotide_context_ids, 
            'chromosome_ids': chromosome_ids,
            'original_positions': original_positions,
            'relative_chrom_pos_ids': relative_chrom_pos_ids,
            'exon_strand_ids': exon_strand_ids,
            'sequencing_method_ids': sequencing_method_ids,
            'ref_evo2_embeddings': ref_evo2_embeddings_combined,
            'attention_mask': attention_mask_base, 
        }
        return inputs

    def __getitem__(self, idx):
        sample_id = self.sample_ids[idx]
        inputs = self.get_sample_cache(sample_id, self.diskcache)

        processed_inputs = {k: v.copy() for k, v in inputs.items()}
        labels = processed_inputs['pentanucleotide_context_ids'].copy() 
        mlm_indices = np.array([], dtype=int)

        if self.training:
            sample_sequencing_method_id = processed_inputs['sequencing_method_ids'][0].item()

            effective_mask_ratio = self.mask_ratio 
            # 更新：现在检查 self.targeted_id
            if sample_sequencing_method_id == self.targeted_id:
                effective_mask_ratio = 0.20

            valid_indices = np.where(processed_inputs['attention_mask'] == 1)[0]
            num_non_pad_tokens = len(valid_indices)

            num_to_mask = int(num_non_pad_tokens * effective_mask_ratio)
            if num_to_mask == 0 and num_non_pad_tokens > 0:
                num_to_mask = 1

            if num_to_mask > 0:
                masked_indices = np.random.choice(valid_indices, num_to_mask, replace=False)
                mlm_indices = masked_indices 

                if len(mlm_indices) > 0:
                    choice_probs = np.array([0.8, 0.1, 0.1])
                    choices = np.random.choice([0, 1, 2], size=len(mlm_indices), p=choice_probs)

                    mask_replace_mask = (choices == 0)
                    processed_inputs['pentanucleotide_context_ids'][mlm_indices[mask_replace_mask]] = self.MASK_ID 
                    processed_inputs['original_positions'][mlm_indices[mask_replace_mask]] = self.PAD_ID
                    processed_inputs['relative_chrom_pos_ids'][mlm_indices[mask_replace_mask]] = self.PAD_ID

                    random_replace_mask = (choices == 1)
                    num_random_tokens = random_replace_mask.sum()
                    if num_random_tokens > 0:
                        processed_inputs['pentanucleotide_context_ids'][mlm_indices[random_replace_mask]] = np.random.randint( 
                            1, self.num_pentanucleotide_contexts + 1, size=num_random_tokens 
                        )
                        processed_inputs['original_positions'][mlm_indices[random_replace_mask]] = self.PAD_ID
                        processed_inputs['relative_chrom_pos_ids'][mlm_indices[random_replace_mask]] = self.PAD_ID
            
            if self.random_unknown_strand_prob > 0:
                valid_indices_for_strand_mod = np.where(processed_inputs['attention_mask'] == 1)[0]
                
                num_to_modify = int(len(valid_indices_for_strand_mod) * self.random_unknown_strand_prob)
                
                if num_to_modify > 0:
                    indices_to_modify = np.random.choice(valid_indices_for_strand_mod, num_to_modify, replace=False)
                    processed_inputs['exon_strand_ids'][indices_to_modify] = self.PAD_ID 

        for k, v in processed_inputs.items():
            processed_inputs[k] = torch.from_numpy(v)

        labels = torch.from_numpy(labels).long()

        return processed_inputs, labels


class MutationClassificationDataset(MutationPretrainDataset):
    def __init__(self, df_merged, data_root, col_label='clin_CANCER_TYPE_manual_mapped_id', **kwargs):
        if 'training' not in kwargs:
            kwargs['training'] = False
        kwargs.pop('mask_ratio', None)
        kwargs.pop('stochastic_chrom_shuffling', None)
        kwargs.pop('random_unknown_strand_prob', None)

        super().__init__(df_merged, data_root, **kwargs)

        self.col_label = col_label

        initial_rows_before_dropna = self.df_all_mutations.shape[0]
        self.df_all_mutations.dropna(subset=[self.col_label], inplace=True)
        if self.df_all_mutations.shape[0] < initial_rows_before_dropna:
            warnings.warn(f"MutationClassificationDataset: 移除了 {initial_rows_before_dropna - self.df_all_mutations.shape[0]} 行因缺失标签而无法使用的样本。")

        sample_id_to_label_df = self.df_all_mutations.drop_duplicates(subset=['sampleId'])[
            ['sampleId', self.col_label]
        ]
        self.sample_labels = dict(zip(sample_id_to_label_df['sampleId'], sample_id_to_label_df[self.col_label]))

        self.sample_ids = [s_id for s_id in self.sample_ids if s_id in self.sample_labels]
        self.sample_id_to_mutation_count = {s_id: count for s_id, count in self.sample_id_to_mutation_count.items() if s_id in self.sample_labels}

        unique_labels = np.unique(list(self.sample_labels.values()))
        if len(unique_labels) > 0:
            if unique_labels.min() != 0:
                warnings.warn("警告：分类标签不从0开始。请确保标签已正确编码为0到N-1的连续整数。")
            self.num_classes = int(unique_labels.max()) + 1
        else:
            self.num_classes = 0

        print(f"MutationClassificationDataset: 检测到 {self.num_classes} 个分类类别。")


    def __getitem__(self, idx):
        sample_id = self.sample_ids[idx]
        inputs_raw = self.get_sample_cache(sample_id, self.diskcache)
        
        processed_inputs = {k: v.copy() for k, v in inputs_raw.items()}
        for k, v in processed_inputs.items():
            processed_inputs[k] = torch.from_numpy(v)

        label = self.sample_labels[sample_id]

        return processed_inputs, torch.tensor(label).long()