import torch
from torch.utils.data import Dataset
import numpy as np
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
import multiprocessing as mp
# 设置多进程启动方法为spawn
if mp.get_start_method(allow_none=True) != 'spawn':
    mp.set_start_method('spawn', force=True)

from functools import partial
from pathlib import Path
import h5py
from tqdm import tqdm
from .model import CNNFeatureExtractor
import logging
import pysam
import pyBigWig
from contextlib import nullcontext
import math
from datetime import datetime
import re
import os
import sys
import time
import json
import concurrent.futures
import random
import hashlib
import pickle

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

# 检查是否已经添加了控制台处理器
if not logger.handlers:
    # 添加控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
    console_handler.setFormatter(formatter)
    logger.addHandler(console_handler)

class GenomicDataset(Dataset):
    def __init__(self, genome_fasta, histone_marks, gene_expression_file=None, 
                 window_size=1000, use_cache=True, output_dir="output", cache_dir=None,
                 use_dim_reduction=False, dim_reduction_method='none', n_components=50,
                 target_chromosome=None, target_start=None, target_end=None,
                 use_parallel=True):
        """
        初始化基因组数据集

        Args:
            genome_fasta: 基因组FASTA文件路径
            histone_marks: 组蛋白修饰数据字典，格式为 {mark_name: file_path}
            gene_expression_file: 基因表达数据文件路径
            window_size: 滑动窗口大小
            use_cache: 是否使用缓存
            output_dir: 输出目录
            cache_dir: 缓存目录，如果为None则使用output_dir
            use_dim_reduction: 是否使用降维
            dim_reduction_method: 降维方法，可选 'pca', 'umap', 'tsne'
            n_components: 降维组件数
            target_chromosome: 目标染色体
            target_start: 目标区域起始位置
            target_end: 目标区域结束位置
            use_parallel: 是否使用并行处理
        """
        # 定义染色质状态
        self.chromatin_states = {
            'active_promoter': 0,
            'weak_promoter': 1,
            'strong_enhancer': 2,
            'weak_enhancer': 3,
            'insulator': 4,
            'transcription': 5,
            'heterochromatin': 6,
            'repressed': 7,
            'poised_promoter': 8,
            'unknown': 9
        }
        
        # 设置标签到处理函数的映射
        self.label_to_function = {
            0: "promoter_processing",   # 活跃启动子
            1: "promoter_processing",   # 弱启动子
            2: "enhancer_processing",   # 强增强子
            3: "enhancer_processing",   # 弱增强子
            4: "insulator_processing",  # 绝缘子
            5: "transcription_processing",  # 转录区
            6: "heterochromatin_processing",  # 异染色质
            7: "repressed_processing",  # 抑制区
            8: "promoter_processing",   # 蓄势启动子
            9: "default"               # 未知区域使用默认处理
        }
        
        # 设置处理函数到标签的反向映射
        self.function_to_label = {
            "promoter_processing": 0,   # 活跃启动子和弱启动子
            "enhancer_processing": 2,   # 强增强子和弱增强子
            "insulator_processing": 4,  # 绝缘子
            "transcription_processing": 5,  # 转录区
            "heterochromatin_processing": 6,  # 异染色质
            "repressed_processing": 7,  # 抑制区
            "default": 9               # 默认处理对应未知区域
        }
        
        # 存储基因组路径和组蛋白标记
        self.genome_fasta = genome_fasta
        self.histone_marks = histone_marks or {}
        self.gene_expression_file = gene_expression_file

        # 窗口大小设置
        self.window_size = window_size
        
        # 缓存和并行设置
        self.use_cache = use_cache
        self.output_dir = Path(output_dir)
        self.cache_dir = Path(cache_dir) if cache_dir else self.output_dir
        
        # 创建输出目录和缓存目录
        self.output_dir.mkdir(parents=True, exist_ok=True)
        self.cache_dir.mkdir(parents=True, exist_ok=True)
        logger.info(f"使用缓存目录: {self.cache_dir}")
        
        # 降维设置
        self.use_dim_reduction = use_dim_reduction
        self.dim_reduction_method = dim_reduction_method
        self.n_components = n_components
        self.plot_reduction = True
        
        # 目标区域设置
        self.target_chromosome = target_chromosome
        self.target_start = target_start
        self.target_end = target_end
        
        # 并行处理设置
        self.use_parallel = use_parallel
        
        # 初始化组蛋白修饰数据
        self.marks_data = {}
        for mark_name, mark_file in self.histone_marks.items():
            if Path(mark_file).exists():
                self.marks_data[mark_name] = mark_file
                logger.info(f"添加组蛋白修饰 {mark_name}: {mark_file}")
            else:
                logger.warning(f"组蛋白修饰文件不存在: {mark_file}")
        
        # 初始化染色体长度字典
        self._chrom_lengths = {}
        
        # 加载基因组数据
        try:
            logger.info(f"载入基因组序列: {self.genome_fasta}")
            self.genome = pysam.FastaFile(self.genome_fasta)
            
            # 获取染色体长度信息
            for chrom in self.genome.references:
                length = self.genome.get_reference_length(chrom)
                if length > 0:
                    self._chrom_lengths[chrom] = length
            
            logger.info(f"成功加载 {len(self._chrom_lengths)} 个染色体的长度信息")
            
            # 初始化bigwig句柄
            self.bigwig_handles = {}
            
            # 打开所有BigWig文件
            for mark_name, mark_file in self.histone_marks.items():
                try:
                    if os.path.exists(mark_file):
                        bw = pyBigWig.open(mark_file)
                        if bw:
                            self.bigwig_handles[mark_name] = bw
                            logger.debug(f"成功打开BigWig文件: {mark_file}")
                        else:
                            logger.warning(f"无法打开BigWig文件: {mark_file}")
                    else:
                        logger.warning(f"BigWig文件不存在: {mark_file}")
                except Exception as e:
                    logger.error(f"打开BigWig文件 {mark_file} 时出错: {str(e)}")
            
            logger.info(f"成功打开 {len(self.bigwig_handles)}/{len(self.histone_marks)} 个BigWig文件")
            
        except Exception as e:
            logger.error(f"加载基因组数据时出错: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            raise
        
        # 初始化数据
        self._init_data()

    def _init_data(self):
        """初始化数据集"""
        try:
            # 染色体长度已经在__init__中获取，这里只需要进行验证和处理
            if not self._chrom_lengths:
                if self.target_chromosome:
                    raise ValueError(f"没有找到指定的染色体: {self.target_chromosome}")
                else:
                    raise ValueError("没有找到有效的染色体长度信息")
            
            # 如果指定了目标染色体，过滤只保留该染色体
            if self.target_chromosome:
                if self.target_chromosome not in self._chrom_lengths:
                    raise ValueError(f"指定的染色体不存在: {self.target_chromosome}")
                
                chrom_lengths = {self.target_chromosome: self._chrom_lengths[self.target_chromosome]}
                
                # 如果指定了目标区域，调整长度
                if self.target_start is not None and self.target_end is not None:
                    # 再次验证两个值都不是None，并且都可以转换为整数
                    try:
                        start = int(self.target_start)
                        end = int(self.target_end)
                        
                        original_length = chrom_lengths[self.target_chromosome]
                        if end > original_length:
                            logger.warning(f"指定的结束位置 {end} 超出染色体长度 {original_length}，将使用染色体末端")
                            end = original_length
                        if start >= end:
                            raise ValueError(f"无效的区域范围: {start}-{end}")
                        
                        # 更新长度为目标区域长度
                        chrom_lengths[self.target_chromosome] = end - start
                        
                        # 更新实例变量以确保一致性
                        self.target_start = start
                        self.target_end = end
                    except (TypeError, ValueError) as e:
                        logger.error(f"处理目标区域参数时出错: {str(e)}")
                        raise ValueError(f"目标区域参数无效: start={self.target_start}, end={self.target_end}, 确保两个参数都是有效的整数")
                elif self.target_start is not None or self.target_end is not None:
                    # 当只提供了一个参数时发出警告
                    logger.warning(f"只提供了一个区域边界参数: start={self.target_start}, end={self.target_end}，需要同时提供两个参数")
                
                self._chrom_lengths = chrom_lengths
                
            logger.info(f"将处理 {len(self._chrom_lengths)} 个染色体")
            if self.target_chromosome:
                if self.target_start is not None and self.target_end is not None:
                    logger.info(f"将只处理区域: {self.target_chromosome}:{self.target_start}-{self.target_end}")
                else:
                    logger.info(f"将只处理染色体: {self.target_chromosome}")
            
            # 验证BigWig数据
            logger.info("验证BigWig文件数据...")
            stats = self._validate_bigwig_data()
            if stats:
                valid_marks = 0
                for mark_name, chrom_stats in stats.items():
                    valid_chroms = sum(1 for stat in chrom_stats.values() if stat['valid'])
                    if valid_chroms > 0:
                        valid_marks += 1
                        logger.info(f"{mark_name}: 在 {valid_chroms} 个染色体上有有效数据")
                
                logger.info(f"共有 {valid_marks}/{len(self.histone_marks)} 个修饰有有效数据")
            
            # 尝试从缓存加载数据
            if self.use_cache and self._try_load_cache():
                return
                
            logger.info("开始处理数据...")
            self._process_data()
            
        except Exception as e:
            logger.error(f"初始化基因组数据时出错: {str(e)}")
            raise
            
    def _process_data(self):
        """
        处理基因组数据，提取特征并生成标签
        支持单线程处理和并行处理
        """
        import time
        from .parallel_processor import HeterogeneousProcessor, timer

        # 检测是否使用并行处理（需要同时满足use_parallel设置和CUDA可用）
        use_parallel = self.use_parallel and torch.cuda.is_available()
        
        # 确保窗口大小是有效的
        if not self.window_size or self.window_size <= 0:
            self.window_size = 1000
            logger.warning(f"窗口大小无效，已设置为默认值: {self.window_size}")
        
        # 记录处理开始时间
        start_time = time.time()
        
        # 如果设置了使用并行处理，则初始化HeterogeneousProcessor
        if use_parallel:
            logger.info("使用GPU-CPU异构并行处理数据")
            try:
                # 初始化并行处理器
                processor = HeterogeneousProcessor(
                    dataset=self,
                    cpu_workers=16,  # 最多16个CPU线程
                    batch_size=64,   # GPU批处理大小
                    fp16=True        # 使用半精度计算加速
                )
                
                # 获取要处理的染色体列表
                chromosomes = list(self._chrom_lengths.keys())
                
                # 使用并行处理器处理所有染色体
                with timer("并行处理所有染色体"):
                    features, labels = processor.process_chromosomes(chromosomes)
                
                if features is None or labels is None:
                    logger.warning("并行处理未找到有效特征数据，将回退到单线程处理")
                    use_parallel = False
                else:
                    # 记录结果
                    self.features = features
                    self.labels = labels
                    logger.info(f"并行处理完成，共生成 {len(features)} 个样本，耗时: {time.time() - start_time:.2f} 秒")
            except Exception as e:
                logger.error(f"并行处理出错，将回退到单线程处理: {str(e)}")
                import traceback
                logger.error(traceback.format_exc())
                use_parallel = False
        
        # 如果并行处理未成功或未启用，则使用单线程处理
        if not use_parallel:
            logger.info("使用单线程处理数据")
            
            # 记录处理开始时间
            single_thread_start = time.time()
            
            # 确保窗口大小是有效的
            if not self.window_size or self.window_size <= 0:
                self.window_size = 1000
                logger.warning(f"窗口大小无效，已设置为默认值: {self.window_size}")
            
            # 初始化数据结构
            self.regions = []
            all_features = []
            all_labels = []
            
            # 处理每个染色体
            for chrom, length in self._chrom_lengths.items():
                logger.info(f"正在处理染色体 {chrom}，长度: {length} bp")
                
                # 处理单个染色体数据
                regions, features, labels = self._process_chromosome(chrom, length)
                
                # 合并结果
                if regions and len(features) > 0 and len(labels) > 0:
                    self.regions.extend(regions)
                    all_features.append(features)
                    all_labels.append(labels)
                    logger.info(f"染色体 {chrom} 处理完成: {len(regions)} 个区域")
                else:
                    logger.warning(f"染色体 {chrom} 没有生成有效数据")
            
            # 检查是否有有效数据
            if not all_features or not all_labels:
                logger.warning("未找到有效的特征数据")
                return
            
            # 合并所有特征和标签
            self.features = torch.cat(all_features, dim=0)
            self.labels = torch.cat(all_labels, dim=0)
            
            logger.info(f"单线程处理完成，共生成 {len(self.features)} 个样本，"
                        f"耗时: {time.time() - single_thread_start:.2f} 秒")
        
        # 在GPU上合并特征和标签
        if torch.cuda.is_available():
            logger.info(f"将数据移动到GPU处理")
            self.features = self.features.to('cuda')
            self.labels = self.labels.to('cuda')
        
        # 记录张量形状和标签分布
        logger.info(f"特征张量形状: {self.features.shape}")
        logger.info(f"标签张量形状: {self.labels.shape}")
        
        # 记录标签分布
        unique_labels, counts = torch.unique(self.labels, return_counts=True)
        for label, count in zip(unique_labels.cpu().numpy(), counts.cpu().numpy()):
            label_name = self.label_to_function.get(label, f"类别{label}")
            percentage = 100.0 * count / len(self.labels)
            logger.info(f"类别 {label} ({label_name}): {count} 样本, {percentage:.2f}%")
            
        # 保存处理后的缓存
        if self.use_cache:
            self._save_cache()
        
        # 处理完成后统计标签分布
        self._print_label_distribution()
    
    def _process_chromosome(self, chrom, length):
        """处理单个染色体数据"""
        try:
            logger.info(f"处理染色体 {chrom} 数据，总长度 {length} bp")
            
            # 确定使用的设备
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            logger.info(f"使用设备: {device}")
            
            # 调整开始位置和结束位置
            start_pos = 0
            end_pos = length
            
            # 如果指定了目标区域并且当前染色体是目标染色体
            if self.target_chromosome == chrom and self.target_start is not None and self.target_end is not None:
                start_pos = self.target_start
                end_pos = min(self.target_end, length)
            
            # 计算窗口数量
            window_size = self.window_size
            num_windows = max(1, (end_pos - start_pos) // window_size)
            
            logger.info(f"在染色体 {chrom} 上创建 {num_windows} 个窗口")
            
            # 创建窗口区域
            regions = []
            for i in range(num_windows):
                window_start = start_pos + i * window_size
                window_end = min(window_start + window_size, end_pos)
                
                # 创建区域
                region = {
                    'chrom': chrom,
                    'start': window_start,
                    'end': window_end
                }
                regions.append(region)
            
            # 为每个区域提取特征和标签
            features_list = []
            labels_list = []
            
            # 用于统计标签分布
            label_counts = {}
            
            for region in regions:
                try:
                    # 获取序列特征
                    seq_features = self._get_sequence_fast(region['chrom'], region['start'], region['end'])
                    if seq_features is None:
                        continue
                    
                    # 编码序列特征，确保在正确的设备上
                    encoded_seq = self._one_hot_encode_batch([seq_features]).to(device)
                    
                    # 获取组蛋白特征，确保在正确的设备上
                    histone_features = self._get_histone_features_fast(region['chrom'], region['start'], region['end'])
                    if histone_features is None:
                        continue
                    histone_features = histone_features.to(device)
                    
                    # 合并特征，确保在同一设备上
                    feature_tensor = torch.cat([encoded_seq.view(1, -1), histone_features.view(1, -1)], dim=1)
                    features_list.append(feature_tensor)
                    
                    # 为区域分配标签，确保在正确的设备上
                    label = self._determine_window_function(region['chrom'], region['start'], region['end'], histone_features.cpu().numpy() if histone_features is not None else None)
                    labels_list.append(torch.tensor([label], dtype=torch.long, device=device))
                    
                    # 统计标签
                    if label not in label_counts:
                        label_counts[label] = 0
                    label_counts[label] += 1
                    
                except Exception as e:
                    logger.warning(f"处理区域 {region['chrom']}:{region['start']}-{region['end']} 时出错: {str(e)}")
                    continue
            
            # 检查是否有有效特征
            if not features_list or len(features_list) == 0:
                logger.warning(f"染色体 {chrom} 没有提取到有效特征")
                return [], torch.tensor([], device=device), torch.tensor([], device=device)
            
            # 合并特征和标签，确保所有张量在同一设备上
            all_features = torch.cat(features_list, dim=0)
            all_labels = torch.cat(labels_list, dim=0)
            
            # 打印标签统计
            total_labels = sum(label_counts.values())
            logger.info(f"染色体 {chrom} 标签分布:")
            for label, count in sorted(label_counts.items()):
                # 获取标签对应的染色质状态名称
                state_name = "未知"
                for name, idx in self.chromatin_states.items():
                    if idx == label:
                        state_name = name
                        break
                percentage = (count / total_labels) * 100 if total_labels > 0 else 0
                logger.info(f"  标签 {label} ({state_name}): {count} 个区域, {percentage:.2f}%")
            
            # 检查标签分布是否多样化
            if len(label_counts) <= 1:
                logger.warning(f"警告: 染色体 {chrom} 的区域均被分配到同一个标签类别，这可能表明标签生成逻辑存在问题")
            
            logger.info(f"染色体 {chrom} 处理完成，生成 {len(regions)} 个区域，{len(all_features)} 个有效特征")
            
            return regions, all_features, all_labels
            
        except Exception as e:
            logger.error(f"处理染色体 {chrom} 时出错: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            return [], torch.tensor([], device=device), torch.tensor([], device=device)
    
    def _determine_window_function(self, chrom=None, start=None, end=None, window_data=None, batch_idx=None, window_idx=None, processor_id=None):
        """
        确定区域的处理函数
        
        根据染色体区域和窗口数据确定应使用哪种处理函数
        
        Args:
            chrom: 染色体名称
            start: 区域起始位置
            end: 区域结束位置
            window_data: 窗口的组蛋白修饰数据，形状为[n_marks, window_size]
            batch_idx: 批次索引（可选）
            window_idx: 窗口索引（可选）
            processor_id: 处理器ID（可选）
            
        Returns:
            int: 对应的标签索引（0-8）
        """
        try:
            # 创建区域对象（如果直接传入了染色体、起始和结束位置）
            region = None
            if chrom is not None and start is not None and end is not None:
                region = {'chrom': chrom, 'start': start, 'end': end}
            
            # 定义标签类型的特征模式
            # 每种染色质状态对应的主要组蛋白修饰信号模式
            chromatin_patterns = {
                'active_promoter': [0, 1, 2],   # 例如：H3K4me3, H3K27ac, H3K9ac
                'weak_promoter': [3, 4, 5],     # 例如：H3K4me3低信号, H3K4me1
                'strong_enhancer': [6, 7, 8],   # 例如：H3K4me1, H3K27ac高信号
                'weak_enhancer': [9, 10, 11],   # 例如：H3K4me1低信号
                'insulator': [12, 13, 14],      # 例如：CTCF
                'transcription': [15, 16, 17],  # 例如：H3K36me3, H3K79me2
                'heterochromatin': [18, 19, 20], # 例如：H3K9me3
                'repressed': [21, 22, 23],      # 例如：H3K27me3
                'poised_promoter': [24, 25, 26] # 例如：H3K4me3, H3K27me3
            }
            
            # 获取序列，用于motif分析
            sequence = None
            if chrom is not None and start is not None and end is not None:
                sequence = self._get_sequence_fast(chrom, start, end)
            
            # 定义各种染色质状态相关的motif
            motifs = {
                'active_promoter': {'TATA_box': 'TATAAA', 'CpG': 'CGCG', 'GC_box': 'GGGCGG'},
                'weak_promoter': {'TATA_box': 'TATAAA', 'Inr': 'YYANWYY'},
                'strong_enhancer': {'AP1': 'TGASTCA', 'GATA': 'WGATAR', 'ETS': 'GGAW'},
                'weak_enhancer': {'AP1': 'TGASTCA', 'SOX': 'WWCAAWG'},
                'insulator': {'CTCF': 'CCGCGNGGNGGCAG'},
                'transcription': {'polyA': 'AATAAA'},
                'heterochromatin': {'CENP_B': 'NTTCGNNNNANNCGGGAN'},
                'repressed': {'PRE': 'GYGRGC'},
                'poised_promoter': {'TATA_box': 'TATAAA', 'PRC': 'GCCGCG'}
            }
            
            # 计算motif信号分数
            motif_scores = {}
            if sequence:
                for state, state_motifs in motifs.items():
                    motif_counts = 0
                    for motif_name, motif_pattern in state_motifs.items():
                        motif_counts += self._find_motif(sequence, motif_pattern)
                    motif_scores[state] = motif_counts
            
            # 根据组蛋白修饰数据确定染色质状态
            if window_data is not None and isinstance(window_data, np.ndarray) and window_data.size > 0:
                # 计算各标记的平均信号强度
                mark_signals = np.nanmean(window_data, axis=1)
                
                # 计算每种染色质状态的组蛋白修饰分数
                histone_scores = {}
                for state, mark_indices in chromatin_patterns.items():
                    valid_indices = [idx for idx in mark_indices if idx < len(mark_signals)]
                    if valid_indices:
                        state_signals = [mark_signals[idx] for idx in valid_indices]
                        histone_scores[state] = np.mean(state_signals)
                    else:
                        histone_scores[state] = 0.0
                
                # 如果同时有motif和组蛋白得分，结合两者
                combined_scores = {}
                if motif_scores:
                    for state in chromatin_patterns.keys():
                        # 组蛋白修饰得分权重为0.7，motif得分权重为0.3
                        histone_weight = 0.7
                        motif_weight = 0.3
                        
                        histone_score = histone_scores.get(state, 0.0)
                        motif_score = motif_scores.get(state, 0.0)
                        
                        # 标准化motif得分 (简单方法)
                        max_motif = max(motif_scores.values()) if motif_scores else 1
                        norm_motif_score = motif_score / max_motif if max_motif > 0 else 0
                        
                        combined_scores[state] = histone_weight * histone_score + motif_weight * norm_motif_score
                else:
                    combined_scores = histone_scores
                
                # 找出得分最高的染色质状态
                if combined_scores:
                    max_state = max(combined_scores, key=combined_scores.get)
                    return self.chromatin_states[max_state]
                
                # 如果没有组合得分，使用传统方法
                max_idx = np.argmax(mark_signals)
                
                # 根据信号强度和索引范围确定染色质状态
                if max_idx < 6:  # 前6个特征
                    return self.chromatin_states['active_promoter']  # 0: 活跃启动子
                elif max_idx < 12:
                    return self.chromatin_states['weak_promoter']  # 1: 弱启动子
                elif max_idx < 15:
                    return self.chromatin_states['strong_enhancer']  # 2: 强增强子
                elif max_idx < 18:
                    return self.chromatin_states['weak_enhancer']  # 3: 弱增强子
                elif max_idx < 21:
                    return self.chromatin_states['insulator']  # 4: 绝缘子
                elif max_idx < 24:
                    return self.chromatin_states['transcription']  # 5: 转录区
                elif max_idx < 27:
                    return self.chromatin_states['heterochromatin']  # 6: 异染色质
                elif max_idx < 30:
                    return self.chromatin_states['repressed']  # 7: 抑制区
                else:
                    return self.chromatin_states['poised_promoter']  # 8: 蓄势启动子
            
            # 如果只有序列信息，使用motif评分
            elif motif_scores:
                max_state = max(motif_scores, key=motif_scores.get)
                return self.chromatin_states[max_state]
            
            # 如果没有足够的数据，返回默认标签（未知）
            return self.chromatin_states['unknown']  # 9: 未知
            
        except Exception as e:
            logger.warning(f"确定窗口函数时出错: {str(e)}")
            import traceback
            logger.debug(traceback.format_exc())
            return self.chromatin_states['unknown']  # 出错时返回未知标签

    def _get_sequence_fast(self, chrom, start, end):
        """快速获取序列的方法"""
        try:
            # 确保window_size是有效的整数
            window_size = self.window_size if isinstance(self.window_size, int) and self.window_size > 0 else 1000
            
            seq = self.genome.fetch(str(chrom), start, end).upper()
            return seq
        except Exception as e:
            logger.debug(f"无法获取序列 {chrom}:{start}-{end}: {str(e)}")
            return None

    def _get_histone_features_fast(self, chrom, start, end):
        """快速获取组蛋白特征"""
        try:
            features = torch.zeros(len(self.histone_marks))
            for idx, (mark_name, handle) in enumerate(self.bigwig_handles.items()):
                if handle and chrom in handle.chroms():
                    try:
                        values = handle.values(chrom, start, end)
                        if values:
                            features[idx] = torch.tensor(np.nanmean(values))
                    except:
                        continue
            return features
            
        except Exception as e:
            logger.warning(f"快速组蛋白特征获取失败: {str(e)}")
            return None

    def _one_hot_encode_batch(self, sequences):
        """对一批序列进行独热编码"""
        # 确保window_size是有效的整数
        window_size = self.window_size if isinstance(self.window_size, int) and self.window_size > 0 else 1000
        
        batch_size = len(sequences)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        # 创建输出张量
        encoded = torch.zeros((batch_size, 4, window_size), device=device)
        
        # 对每个序列进行独热编码
        for i, seq in enumerate(sequences):
            if seq is None:
                continue
            
            for j, base in enumerate(seq):
                if j >= window_size:
                    break
                    
                if base == 'A':
                    encoded[i, 0, j] = 1.0
                elif base == 'T':
                    encoded[i, 1, j] = 1.0
                elif base == 'G':
                    encoded[i, 2, j] = 1.0
                elif base == 'C':
                    encoded[i, 3, j] = 1.0
                # 其他碱基（如N）保持为0
        
        return encoded

    def _validate_bigwig_data(self):
        """验证BigWig文件数据的可用性"""
        try:
            logger.info("开始验证BigWig文件数据...")
            stats = {}
            
            for mark_name, bw in self.bigwig_handles.items():
                try:
                    # 获取染色体的统计信息
                    chrom_stats = {}
                    for chrom in self._chrom_lengths.keys():
                        if chrom in bw.chroms():
                            try:
                                # 获取整个染色体的统计信息
                                stat_values = bw.stats(chrom, 0, self._chrom_lengths[chrom], type="mean")
                                # stats()返回的是列表，我们需要检查第一个元素
                                if stat_values and stat_values[0] is not None:
                                    mean_value = float(stat_values[0])
                                    chrom_stats[chrom] = {
                                        'mean': mean_value,
                                        'valid': True
                                    }
                                    logger.debug(f"{mark_name} 在 {chrom} 上的平均信号强度: {mean_value:.4f}")
                                else:
                                    chrom_stats[chrom] = {
                                        'mean': 0.0,
                                        'valid': False
                                    }
                                    logger.warning(f"{mark_name} 在 {chrom} 上没有有效数据")
                            except Exception as e:
                                logger.warning(f"获取{mark_name}在{chrom}的统计信息失败: {str(e)}")
                                chrom_stats[chrom] = {
                                    'mean': 0.0,
                                    'valid': False
                                }
                        else:
                            logger.warning(f"{mark_name} 中不存在染色体 {chrom}")
                            chrom_stats[chrom] = {
                                'mean': 0.0,
                                'valid': False
                            }
                    
                    valid_chroms = sum(1 for stat in chrom_stats.values() if stat['valid'])
                    if valid_chroms == 0:
                        logger.warning(f"{mark_name}在所有染色体上都没有有效数据")
                    else:
                        logger.info(f"{mark_name}在{valid_chroms}个染色体上有有效数据")
                    
                    stats[mark_name] = chrom_stats
                    
                except Exception as e:
                    logger.error(f"验证{mark_name}数据时出错: {str(e)}")
                    continue
            
            return stats
            
        except Exception as e:
            logger.error(f"验证BigWig数据时出错: {str(e)}")
            return None
    
    def _find_motif(self, sequence, motif_pattern):
        """在DNA序列中查找指定的motif模式
        
        Args:
            sequence: DNA序列字符串
            motif_pattern: 使用IUPAC核苷酸代码的motif模式
            
        Returns:
            int: motif出现的次数
        """
        try:
            # 转换IUPAC核苷酸代码为正则表达式
            pattern = self._iupac_to_regex(motif_pattern)
            
            # 将正则表达式编译为更高效的对象
            regex = re.compile(pattern)
            
            # 查找所有匹配
            matches = regex.finditer(sequence)
            
            # 计算匹配数量
            count = sum(1 for _ in matches)
            
            return count
        except Exception as e:
            logger.warning(f"搜索motif '{motif_pattern}'时出错: {str(e)}")
            return 0
    
    def _find_motif_positions(self, sequence, motif_pattern):
        """在DNA序列中查找指定的motif模式，并返回位置
        
        Args:
            sequence: DNA序列字符串
            motif_pattern: 使用IUPAC核苷酸代码的motif模式
            
        Returns:
            list: motif出现的位置列表
        """
        try:
            # 转换IUPAC核苷酸代码为正则表达式
            pattern = self._iupac_to_regex(motif_pattern)
            
            # 将正则表达式编译为更高效的对象
            regex = re.compile(pattern)
            
            # 查找所有匹配并记录位置
            positions = [match.start() for match in regex.finditer(sequence)]
            
            return positions
        except Exception as e:
            logger.warning(f"搜索motif '{motif_pattern}'位置时出错: {str(e)}")
            return []
    
    def _iupac_to_regex(self, iupac_pattern):
        """将IUPAC核苷酸代码转换为正则表达式
        
        Args:
            iupac_pattern: 使用IUPAC核苷酸代码的模式
            
        Returns:
            str: 正则表达式模式
        """
        # IUPAC核苷酸代码对应表
        iupac_map = {
            'A': 'A',
            'C': 'C',
            'G': 'G',
            'T': 'T',
            'R': '[AG]',   # A or G (puRine)
            'Y': '[CT]',   # C or T (pYrimidine)
            'S': '[GC]',   # G or C (Strong)
            'W': '[AT]',   # A or T (Weak)
            'K': '[GT]',   # G or T (Keto)
            'M': '[AC]',   # A or C (aMino)
            'B': '[CGT]',  # Not A
            'D': '[AGT]',  # Not C
            'H': '[ACT]',  # Not G
            'V': '[ACG]',  # Not T
            'N': '[ACGT]'  # aNy base
        }
        
        # 转换IUPAC模式为正则表达式
        regex_pattern = ''
        for char in iupac_pattern.upper():
            if char in iupac_map:
                regex_pattern += iupac_map[char]
            else:
                # 如果不是IUPAC字符，保持原样
                regex_pattern += char
                
        return regex_pattern
    
    def _calculate_motif_enrichment(self, sequence, motifs_dict):
        """计算序列中motif的富集程度
        
        Args:
            sequence: DNA序列字符串
            motifs_dict: motif名称和模式的字典
            
        Returns:
            dict: 每个motif的富集数据
        """
        import re
        import math
        
        # 检查输入有效性
        if not sequence or not isinstance(sequence, str) or len(sequence) < 10:
            logger.warning(f"序列无效，无法计算motif富集: {sequence}")
            return {}
            
        try:
            result = {}
            seq_len = len(sequence)
            
            # 核苷酸频率(用于计算随机期望值)
            base_count = {
                'A': sequence.count('A'),
                'C': sequence.count('C'),
                'G': sequence.count('G'),
                'T': sequence.count('T')
            }
            
            # 各核苷酸比例
            base_freq = {
                'A': base_count['A'] / seq_len,
                'C': base_count['C'] / seq_len,
                'G': base_count['G'] / seq_len,
                'T': base_count['T'] / seq_len
            }
            
            # 为所有motif计算富集度
            for motif_name, motif_pattern in motifs_dict.items():
                # 查找motif出现次数
                count = self._find_motif(sequence, motif_pattern)
                
                # 估算随机期望值
                # 简化计算：使用motif长度和序列平均GC含量估算
                expected = 0.1  # 默认期望值
                
                # 如果有足够的核苷酸数量，计算更准确的期望值
                total_counted = sum(base_count.values())
                if total_counted > 0 and total_counted >= 0.9 * seq_len:
                    pattern_len = len(motif_pattern)
                    pattern_prob = 1.0
                    
                    # 简化期望值计算
                    # 对于特定碱基，使用观察到的频率
                    # 对于简并碱基，取平均概率
                    for char in motif_pattern.upper():
                        if char == 'A':
                            pattern_prob *= base_freq['A']
                        elif char == 'C':
                            pattern_prob *= base_freq['C']
                        elif char == 'G':
                            pattern_prob *= base_freq['G']
                        elif char == 'T':
                            pattern_prob *= base_freq['T']
                        else:
                            # 简并碱基，取平均概率0.25
                            pattern_prob *= 0.25
                    
                    # 期望出现次数 = 概率 * 可能位置数
                    expected = pattern_prob * (seq_len - pattern_len + 1)
                
                # 计算富集分数
                # 如果expected为0，避免除以0错误
                if expected <= 0:
                    expected = 0.1
                    
                # 计算富集比率和分数
                enrichment_ratio = count / expected
                
                # 在对数空间中计算分数，对于富集的motif增加分数
                score = 0
                if count > 0:
                    # 使用对数比率作为富集分数，但受count影响
                    log_ratio = math.log2(enrichment_ratio) if enrichment_ratio > 0 else 0
                    
                    # 根据count和log_ratio量化分数
                    if log_ratio > 0:
                        score = min(log_ratio * math.sqrt(count), 5.0)  # 限制最大分数为5
                    elif count >= 3:  # 即使没有富集，出现3次以上也有少量分数
                        score = 0.5
                
                result[motif_name] = {
                    'count': count,
                    'expected': expected,
                    'ratio': enrichment_ratio,
                    'score': score
                }
                
                logger.debug(f"Motif {motif_name}({motif_pattern}): 出现{count}次, 期望{expected:.2f}次, 富集比{enrichment_ratio:.2f}倍, 分数{score:.2f}")
                
            return result
            
        except Exception as e:
            logger.warning(f"计算motif富集时出错: {str(e)}")
            import traceback
            logger.debug(traceback.format_exc())
            return {}

    def generate_dim_reduction_plot(self, force_replot=False, colormap=None):
        """手动生成降维可视化图，不执行降维过程
        
        Args:
            force_replot: 是否强制重新绘制可视化图，即使已经存在
            colormap: 指定使用的颜色映射名称，例如'tab10', 'tab20', 'hsv', 'jet', 'rainbow'等
        
        Returns:
            bool: 是否成功生成可视化图
        """
        if not hasattr(self, 'features'):
            logger.warning("未找到特征数据，无法生成可视化")
            return False
            
        # 只执行可视化部分，不执行降维
        if hasattr(self, 'reduced_features'):
            features = self.reduced_features.cpu().numpy()
            logger.info(f"使用已有的降维特征进行可视化，形状: {features.shape}")
        else:
            # 检查是否设置了使用降维
            if not getattr(self, 'use_dim_reduction', False):
                logger.warning("未执行降维处理 (use_dim_reduction=False)，可视化可能不会显示有意义的结果")
                if not force_replot:
                    logger.warning("跳过可视化绘图。如需强制绘图，请设置force_replot=True")
                    return False
            
            features = self.features.cpu().numpy()
            logger.info(f"使用原始特征进行可视化，形状: {features.shape}")
            logger.warning("没有降维数据，直接使用原始特征绘图可能不会显示有意义的聚类结果")
            
        if hasattr(self, 'labels'):
            labels = self.labels.cpu().numpy()
            unique_labels = np.unique(labels)
            logger.info(f"可视化使用标签着色，发现 {len(unique_labels)} 种唯一标签: {unique_labels}")
            logger.info(f"标签分布: {np.bincount(labels)}")
        else:
            logger.warning("未找到标签数据，可视化将不使用着色")
            labels = None
        
        try:
            # 创建简化版的reducer对象，保持兼容性
            class DummyReducer:
                def __init__(self):
                    self.explained_variance_ratio_ = [0.3, 0.2]  # 假设的值
                    
            reducer = DummyReducer()
            
            # 强制设置文件名，避免覆盖之前的可视化
            self.dim_reduction_method = self.dim_reduction_method or 'manual'
            plot_dir = Path(self.output_dir) / 'plots'
            plot_dir.mkdir(parents=True, exist_ok=True)
            
            # 创建带有时间戳的文件名，避免覆盖
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            
            # 如果指定了色彩映射，添加到文件名
            cm_suffix = f"_{colormap}" if colormap else ""
            plot_file = plot_dir / f'dim_reduction_{self.dim_reduction_method}_manual{cm_suffix}_{timestamp}.png'
            
            # 直接在这里实现可视化功能
            import matplotlib.pyplot as plt
            from matplotlib.colors import ListedColormap
            
            # 创建保存目录
            os.makedirs(os.path.dirname(str(plot_file)), exist_ok=True)
            
            plt.figure(figsize=(12, 10))
            
            # 获取降维特征的维度
            n_dims = features.shape[1]
            
            # 根据维度创建不同的可视化
            if n_dims == 2:
                # 2D可视化
                if labels is not None:
                    # 获取唯一标签
                    unique_labels = np.unique(labels)
                    n_classes = len(unique_labels)
                    
                    # 记录标签信息
                    logger.info(f"可视化使用标签着色，发现 {n_classes} 种唯一标签: {unique_labels}")
                    
                    # 计算每个标签的数量
                    label_counts = np.bincount(labels.astype(int), minlength=n_classes)
                    logger.info(f"标签分布: {label_counts}")
                    
                    # 使用指定的颜色映射或默认颜色映射
                    if colormap is None:
                        if n_classes <= 10:
                            colormap = 'tab10'
                        elif n_classes <= 20:
                            colormap = 'tab20'
                        else:
                            colormap = 'hsv'
                    
                    # 获取颜色映射
                    cmap = plt.cm.get_cmap(colormap, n_classes)
                    
                    # 为每个类别创建散点图
                    for i, label in enumerate(unique_labels):
                        mask = labels == label
                        color = cmap(i / max(1, n_classes-1))
                        plt.scatter(
                            features[mask, 0],
                            features[mask, 1],
                            c=[color],
                            label=f'类别 {label}',
                            alpha=0.7,
                            edgecolors='w',
                            s=80
                        )
                    
                    plt.legend(title="染色质状态", bbox_to_anchor=(1.05, 1), loc='upper left')
                else:
                    # 没有标签时，使用单一颜色
                    plt.scatter(
                        features[:, 0],
                        features[:, 1],
                        c='blue',
                        alpha=0.7,
                        edgecolors='w',
                        s=50
                    )
                
                # 设置标题和标签
                plt.title(f'2D 降维可视化 ({self.dim_reduction_method.upper()})', fontsize=16)
                plt.xlabel('维度 1', fontsize=14)
                plt.ylabel('维度 2', fontsize=14)
                
            elif n_dims == 3:
                # 3D可视化
                from mpl_toolkits.mplot3d import Axes3D
                ax = plt.figure().add_subplot(111, projection='3d')
                
                if labels is not None:
                    # 获取唯一标签
                    unique_labels = np.unique(labels)
                    n_classes = len(unique_labels)
                    
                    # 记录标签信息
                    logger.info(f"可视化使用标签着色，发现 {n_classes} 种唯一标签: {unique_labels}")
                    
                    # 计算每个标签的数量
                    label_counts = np.bincount(labels.astype(int), minlength=n_classes)
                    logger.info(f"标签分布: {label_counts}")
                    
                    # 使用指定的颜色映射或默认颜色映射
                    if colormap is None:
                        if n_classes <= 10:
                            colormap = 'tab10'
                        elif n_classes <= 20:
                            colormap = 'tab20'
                        else:
                            colormap = 'hsv'
                    
                    # 获取颜色映射
                    cmap = plt.cm.get_cmap(colormap, n_classes)
                    
                    # 为每个类别创建散点图
                    for i, label in enumerate(unique_labels):
                        mask = labels == label
                        color = cmap(i / max(1, n_classes-1))
                        ax.scatter(
                            features[mask, 0],
                            features[mask, 1],
                            features[mask, 2],
                            c=[color],
                            label=f'类别 {label}',
                            alpha=0.7,
                            edgecolors='w',
                            s=80
                        )
                    
                    ax.legend(title="染色质状态", bbox_to_anchor=(1.05, 1), loc='upper left')
                else:
                    # 没有标签时，使用单一颜色
                    ax.scatter(
                        features[:, 0],
                        features[:, 1],
                        features[:, 2],
                        c='blue',
                        alpha=0.7,
                        edgecolors='w',
                        s=50
                    )
                
                # 设置标题和标签
                ax.set_title(f'3D 降维可视化 ({self.dim_reduction_method.upper()})', fontsize=16)
                ax.set_xlabel('维度 1', fontsize=14)
                ax.set_ylabel('维度 2', fontsize=14)
                ax.set_zlabel('维度 3', fontsize=14)
                
            else:
                # 如果维度大于3，则只显示前两个维度
                logger.warning(f"特征维度为 {n_dims}，但可视化只支持2D和3D。将显示前2个维度。")
                
                if labels is not None:
                    # 获取唯一标签
                    unique_labels = np.unique(labels)
                    n_classes = len(unique_labels)
                    
                    # 记录标签信息
                    logger.info(f"可视化使用标签着色，发现 {n_classes} 种唯一标签: {unique_labels}")
                    
                    # 计算每个标签的数量
                    label_counts = np.bincount(labels.astype(int), minlength=n_classes)
                    logger.info(f"标签分布: {label_counts}")
                    
                    # 使用指定的颜色映射或默认颜色映射
                    if colormap is None:
                        if n_classes <= 10:
                            colormap = 'tab10'
                        elif n_classes <= 20:
                            colormap = 'tab20'
                        else:
                            colormap = 'hsv'
                    
                    # 获取颜色映射
                    cmap = plt.cm.get_cmap(colormap, n_classes)
                    
                    # 为每个类别创建散点图，确保使用不同颜色
                    for i, label in enumerate(unique_labels):
                        mask = labels == label
                        color = cmap(i / max(1, n_classes-1))
                        plt.scatter(
                            features[mask, 0],
                            features[mask, 1],
                            c=[color],
                            label=f'类别 {label}',
                            alpha=0.7,
                            edgecolors='w',
                            s=80
                        )
                    
                    plt.legend(title="染色质状态", bbox_to_anchor=(1.05, 1), loc='upper left')
                else:
                    # 没有标签时，使用单一颜色
                    plt.scatter(
                        features[:, 0],
                        features[:, 1],
                        c='blue',
                        alpha=0.7,
                        edgecolors='w',
                        s=50
                    )
                
                # 设置标题和标签
                plt.title(f'2D 降维可视化 (前两个维度, {self.dim_reduction_method.upper()})', fontsize=16)
                plt.xlabel('维度 1', fontsize=14)
                plt.ylabel('维度 2', fontsize=14)
            
            # 添加颜色条说明
            if labels is not None:
                # 添加颜色图例说明
                plt.tight_layout()
            
            # 保存图像
            plt.savefig(str(plot_file), dpi=300, bbox_inches='tight')
            plt.close()
            
            logger.info(f"手动生成的降维可视化已保存到: {plot_file}")
            return True
        except Exception as e:
            logger.error(f"生成降维可视化图失败: {str(e)}")
            logger.error(traceback.format_exc())
            return False

    def _try_load_cache(self):
        """尝试从缓存加载数据
        使用self.cache_dir而不是output_dir作为缓存路径
        """
        try:
            # 确定使用的设备
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            
            # 构造缓存路径
            cache_dir = Path(self.cache_dir) / 'cache'
            
            # 生成唯一缓存文件名
            fingerprint = hashlib.md5(f"{self.genome_fasta}_{self.histone_marks}_{self.target_chromosome}_{self.target_start}_{self.target_end}_{self.window_size}".encode()).hexdigest()
            cache_base = cache_dir / fingerprint
            
            # 构造文件路径
            features_file = cache_base.with_suffix('.features.pt')
            labels_file = cache_base.with_suffix('.labels.pt')
            regions_file = cache_base.with_suffix('.regions.pkl')
            reduced_features_file = cache_base.with_suffix('.reduced_features.pt')
            
            logger.info(f"尝试从缓存目录加载数据: {cache_dir}")
            
            # 检查缓存文件是否存在
            if not features_file.exists() or not regions_file.exists():
                logger.info(f"缓存文件不完整，找不到以下文件：{'features' if not features_file.exists() else ''}{'regions' if not regions_file.exists() else ''}")
                logger.info(f"将重新生成数据...")
                return False
                
            # 加载特征数据
            logger.info(f"从缓存加载特征数据...")
            self.features = torch.load(features_file).to(device)
            logger.info(f"加载的特征形状: {self.features.shape}")
            
            # 加载标签数据
            if labels_file.exists():
                logger.info("从缓存加载标签数据...")
                self.labels = torch.load(labels_file).to(device)
                logger.info(f"加载的标签形状: {self.labels.shape}")
                
                # 分析标签分布
                unique_labels, counts = torch.unique(self.labels, return_counts=True)
                total = self.labels.size(0)
                logger.info("标签分布:")
                for label, count in zip(unique_labels.tolist(), counts.tolist()):
                    percentage = (count / total) * 100
                    label_name = [k for k, v in self.chromatin_states.items() if v == label]
                    label_str = label_name[0] if label_name else str(label)
                    logger.info(f"  - {label_str}: {count} ({percentage:.2f}%)")
            else:
                logger.warning("未找到标签文件，创建默认的零标签")
                self.labels = torch.zeros(len(self.features), dtype=torch.long, device=device)
            
            # 加载区域数据
            logger.info("从缓存加载区域数据...")
            with open(regions_file, 'rb') as f:
                self.regions = pickle.load(f)
            logger.info(f"加载了 {len(self.regions)} 个样本")
            
            # 加载降维特征（如果存在）
            if hasattr(self, 'dim_reduction_method') and self.dim_reduction_method and reduced_features_file.exists():
                logger.info(f"从缓存加载降维特征，使用 {self.dim_reduction_method} 方法...")
                self.reduced_features = torch.load(reduced_features_file).to(device)
                logger.info(f"加载的降维特征形状: {self.reduced_features.shape}")
            elif hasattr(self, 'dim_reduction_method') and self.dim_reduction_method:
                logger.info(f"未找到降维特征缓存，将使用 {self.dim_reduction_method} 方法重新生成...")
                self._reduce_dimensions()
                if hasattr(self, 'reduced_features') and self.reduced_features is not None:
                    logger.info(f"生成的降维特征形状: {self.reduced_features.shape}")
                    # 生成可视化
                    if self.plot_reduction:
                        self._plot_reduced_dimensions()
                else:
                    logger.warning("降维失败，无法生成降维特征")
            
            logger.info(f"成功从缓存 {cache_dir} 加载数据")
            return True
            
        except Exception as e:
            logger.error(f"从缓存 {self.cache_dir} 加载数据失败: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            return False
    
    def _save_cache(self):
        """将数据保存到缓存
        使用self.cache_dir而不是output_dir作为保存路径
        """
        try:
            # 创建缓存目录
            cache_dir = Path(self.cache_dir) / 'cache'
            cache_dir.mkdir(parents=True, exist_ok=True)
            
            # 生成唯一的缓存文件名
            fingerprint = hashlib.md5(f"{self.genome_fasta}_{self.histone_marks}_{self.target_chromosome}_{self.target_start}_{self.target_end}_{self.window_size}".encode()).hexdigest()
            cache_base = cache_dir / fingerprint
            
            logger.info(f"保存数据到缓存目录: {cache_dir}")
            
            # 确保features和labels存在
            if not hasattr(self, 'features') or self.features is None or len(self.features) == 0:
                logger.warning("无特征数据可保存到缓存")
                return
            
            # 确保所有张量在CPU上
            if torch.cuda.is_available() and self.features.is_cuda:
                features_cpu = self.features.cpu()
            else:
                features_cpu = self.features

            # 如果没有标签，创建默认的零标签
            if not hasattr(self, 'labels') or self.labels is None:
                logger.warning("未找到标签，创建默认的零标签")
                self.labels = torch.zeros(len(self.features), dtype=torch.long, device=device)
            
            # 确保标签在CPU上
            if torch.cuda.is_available() and self.labels.is_cuda:
                labels_cpu = self.labels.cpu()
            else:
                labels_cpu = self.labels
            
            # 记录特征和标签的形状
            logger.info(f"保存特征形状: {features_cpu.shape}")
            logger.info(f"保存标签形状: {labels_cpu.shape}")
            
            # 记录唯一标签
            unique_labels, counts = torch.unique(labels_cpu, return_counts=True)
            total = labels_cpu.size(0)
            logger.info("标签分布:")
            for label, count in zip(unique_labels.tolist(), counts.tolist()):
                percentage = (count / total) * 100
                label_name = [k for k, v in self.chromatin_states.items() if v == label]
                label_str = label_name[0] if label_name else str(label)
                logger.info(f"  - {label_str}: {count} ({percentage:.2f}%)")
            
            # 保存特征和标签
            torch.save(features_cpu, cache_base.with_suffix('.features.pt'))
            torch.save(labels_cpu, cache_base.with_suffix('.labels.pt'))
            
            # 保存区域信息
            if hasattr(self, 'regions') and self.regions:
                with open(cache_base.with_suffix('.regions.pkl'), 'wb') as f:
                    pickle.dump(self.regions, f)
                logger.info(f"保存了 {len(self.regions)} 个区域到缓存")
            else:
                logger.warning("无区域数据可保存")
            
            # 保存降维特征（如果存在）
            if hasattr(self, 'reduced_features') and self.reduced_features is not None:
                # 确保降维特征在CPU上
                if torch.cuda.is_available() and self.reduced_features.is_cuda:
                    reduced_features_cpu = self.reduced_features.cpu()
                else:
                    reduced_features_cpu = self.reduced_features
                    
                torch.save(reduced_features_cpu, cache_base.with_suffix('.reduced_features.pt'))
                logger.info(f"保存了降维特征，形状: {reduced_features_cpu.shape}")
            
            logger.info("缓存保存完成")
        
        except Exception as e:
            logger.error(f"保存缓存时出错: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())

    def __len__(self):
        """返回数据集大小"""
        if hasattr(self, 'features') and self.features is not None:
            return len(self.features)
        return 0
    
    def __getitem__(self, idx):
        """返回指定索引的样本
        
        Args:
            idx: 样本索引
            
        Returns:
            dict: 包含特征、标签和区域信息的字典
        """
        if not hasattr(self, 'features') or self.features is None:
            raise IndexError("数据集未初始化或为空")
        
        # 获取特征和标签
        feature = self.features[idx]
        
        # 保存原始三维特征
        original_feature = feature
        
        # 根据模型需求处理特征
        # 对于CNN: 保持三维结构 [n_features, seq_length]
        # 对于FC层: 提供处理后的一维特征 [n_features]
        processed_feature = feature
        if feature.dim() == 2:  # [n_features, seq_length]
            # 对于全连接层: 计算平均值得到一维特征
            processed_feature = torch.mean(feature, dim=1)
        
        # 获取区域信息（如果存在）
        region = None
        if hasattr(self, 'regions') and self.regions and idx < len(self.regions):
            region = self.regions[idx]
        
        # 获取标签（如果存在）
        label = None
        if hasattr(self, 'labels') and self.labels is not None and idx < len(self.labels):
            label = self.labels[idx]
        
        # 获取表达量数据（如果存在）
        expression = None
        if hasattr(self, 'expressions') and self.expressions is not None and idx < len(self.expressions):
            expression = self.expressions[idx]
        else:
            # 创建一个默认的空表达量张量
            device = feature.device
            expression = torch.zeros(1, dtype=torch.float32, device=device)
        
        # 返回字典而不是元组，这样可以通过键访问
        result = {
            'features': processed_feature,  # 一维特征，用于全连接层
            'all_features': original_feature,  # 原始三维特征，用于CNN
            'expression': expression  # 表达量数据
        }
        if label is not None:
            result['label'] = label  # 使用'label'而不是'labels'
        if region is not None:
            result['region'] = region
            
        return result

    def _print_label_distribution(self):
        """统计并打印标签分布情况"""
        if not hasattr(self, 'labels') or self.labels is None or self.labels.numel() == 0:
            logger.warning("没有可用标签数据，无法打印分布情况")
            return
        
        try:
            logger.info("标签分布情况:")
            
            # 转换标签到CPU进行统计
            if self.labels.is_cuda:
                labels_cpu = self.labels.cpu().numpy()
            else:
                labels_cpu = self.labels.numpy()
            
            # 统计每个标签的数量
            label_counts = {}
            total_samples = len(labels_cpu)
            
            for label in range(10):  # 0-9共10种可能的标签
                count = np.sum(labels_cpu == label)
                percentage = count / total_samples * 100 if total_samples > 0 else 0
                
                # 获取标签对应的染色质状态名称
                state_name = "未知"
                for name, idx in self.chromatin_states.items():
                    if idx == label:
                        state_name = name
                        break
                
                label_counts[label] = {
                    'count': count,
                    'percentage': percentage,
                    'state_name': state_name
                }
                
                logger.info(f"类别 {label} ({state_name}): {count} 样本, {percentage:.2f}%")
            
            # 检查标签分布是否合理
            non_zero_labels = sum(1 for data in label_counts.values() if data['count'] > 0)
            if non_zero_labels <= 1:
                logger.warning("警告：所有样本都归为同一类别，这可能表明标签生成逻辑存在问题！")
            elif non_zero_labels <= 3:
                logger.warning("警告：样本仅分布在少数几个类别，这可能表明标签多样性不足！")
                
            return label_counts
            
        except Exception as e:
            logger.error(f"打印标签分布时出错: {str(e)}")
            import traceback
            logger.error(traceback.format_exc())
            return None
