import os
import torch
import numpy as np
import multiprocessing as mp
from multiprocessing import Queue, Manager
from queue import Empty
import logging
import time
from contextlib import contextmanager
from tqdm import tqdm
import traceback
import pandas as pd
import threading
import queue
from concurrent.futures import ThreadPoolExecutor, as_completed
from .utils import logger

logger = logging.getLogger(__name__)

class HeterogeneousProcessor:
    """
    异构计算处理器
    
    使用CPU线程池处理BigWig数据读取和预处理，
    同时使用GPU进行CNN特征提取，
    实现高效的并行计算系统。
    """
    
    def __init__(self, dataset, cpu_workers=8, batch_size=64, fp16=True):
        """
        初始化异构处理器
        
        参数:
            dataset: GenomicDataset实例
            cpu_workers: CPU工作线程数量，默认为8
            batch_size: GPU批处理大小，默认为64
            fp16: 是否使用半精度浮点数计算，默认为True
        """
        self.dataset = dataset
        self.window_size = dataset.window_size
        self.genome = dataset.genome
        self.histone_marks = dataset.histone_marks
        self.marks_data = dataset.marks_data
        self.cpu_workers = min(cpu_workers, 16)  # 最多16个CPU线程
        self.batch_size = batch_size
        self.fp16 = fp16 and torch.cuda.is_available()
        
        # 检查GPU是否可用
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if self.device.type == 'cuda':
            logger.info(f"异构处理器将使用GPU: {torch.cuda.get_device_name(0)}")
            if self.fp16:
                logger.info("启用FP16半精度计算以提高性能")
        else:
            logger.warning("GPU不可用，将使用CPU进行所有计算，性能可能受到影响")
            self.fp16 = False
        
        # 设置任务队列
        self.cpu_queue = queue.Queue(maxsize=1000)
        self.gpu_queue = queue.Queue(maxsize=1000)
        self.result_queue = queue.Queue()
        
        # 设置事件标志
        self.stop_event = threading.Event()
        
        logger.info(f"异构处理器初始化完成，CPU工作线程数: {self.cpu_workers}, GPU批大小: {self.batch_size}")
    
    def process_chromosomes(self, chromosomes):
        """
        处理多个染色体数据
        
        参数:
            chromosomes: 要处理的染色体名称列表
        
        返回:
            processed_data: (features, labels)元组
        """
        start_time = time.time()
        logger.info(f"开始处理{len(chromosomes)}个染色体的数据...")
        
        try:
            # 启动工作线程
            cpu_threads = []
            for _ in range(self.cpu_workers):
                thread = threading.Thread(target=self._cpu_worker)
                thread.daemon = True
                thread.start()
                cpu_threads.append(thread)
            
            # 启动GPU工作线程
            gpu_thread = threading.Thread(target=self._gpu_worker)
            gpu_thread.daemon = True
            gpu_thread.start()
            
            # 生成染色体处理任务
            task_count = 0
            for chrom in chromosomes:
                if chrom in self.dataset._chrom_lengths:
                    length = self.dataset._chrom_lengths[chrom]
                    # 计算该染色体上的窗口数量
                    n_windows = max(1, length // self.window_size)
                    logger.info(f"为染色体 {chrom} (长度: {length}bp) 生成 {n_windows} 个窗口任务")
                    
                    # 每个任务处理一个窗口段
                    for i in range(0, n_windows, 100):  # 每100个窗口一个批次
                        end_idx = min(i + 100, n_windows)
                        self.cpu_queue.put((chrom, i * self.window_size, end_idx * self.window_size))
                        task_count += 1
                else:
                    logger.warning(f"跳过未知染色体 {chrom}")
            
            logger.info(f"已生成 {task_count} 个CPU任务")
            
            # 等待所有任务完成
            for _ in range(task_count):
                self.cpu_queue.put(None)  # 发送结束信号
            
            # 等待CPU线程完成
            for thread in cpu_threads:
                thread.join()
            
            # 发送GPU结束信号
            self.gpu_queue.put(None)
            gpu_thread.join()
            
            # 收集结果
            all_features = []
            all_labels = []
            
            result_count = 0
            while not self.result_queue.empty():
                result = self.result_queue.get()
                if result is not None:
                    features, labels = result
                    all_features.append(features)
                    all_labels.append(labels)
                    result_count += 1
            
            logger.info(f"收集了 {result_count} 个结果批次")
            
            if not all_features or not all_labels:
                logger.warning("未找到有效的特征数据")
                return None, None
            
            # 整合所有结果
            all_features = torch.cat(all_features, dim=0)
            all_labels = torch.cat(all_labels, dim=0)
            
            logger.info(f"数据处理完成，总耗时: {time.time() - start_time:.2f}秒，"
                      f"生成特征形状: {all_features.shape}, 标签形状: {all_labels.shape}")
            
            return all_features, all_labels
            
        except Exception as e:
            logger.error(f"数据处理过程中出错: {str(e)}")
            logger.error(traceback.format_exc())
            self.stop_event.set()  # 设置停止标志
            return None, None
        finally:
            # 确保所有线程停止
            self.stop_event.set()
    
    def _cpu_worker(self):
        """CPU工作线程，负责BigWig数据读取和预处理"""
        try:
            while not self.stop_event.is_set():
                # 获取任务
                task = self.cpu_queue.get(timeout=5)
                if task is None:  # 结束信号
                    self.cpu_queue.put(None)  # 传递结束信号
                    break
                
                chrom, start, end = task
                try:
                    # 处理该区域的BigWig数据
                    windows = []
                    labels = []
                    
                    # 以窗口为单位处理数据
                    for pos in range(start, end, self.window_size):
                        window_end = min(pos + self.window_size, end)
                        if window_end - pos < self.window_size * 0.5:  # 窗口过小，跳过
                            continue
                        
                        # 提取该窗口的组蛋白修饰数据
                        marks_values = []
                        for mark_name, mark_data in self.marks_data.items():
                            try:
                                values = mark_data.values(chrom, pos, window_end)
                                marks_values.append(values)
                            except Exception as e:
                                # 处理数据缺失的情况
                                logger.debug(f"无法读取标记 {mark_name} 在 {chrom}:{pos}-{window_end} 的数据: {str(e)}")
                                marks_values.append(np.zeros(window_end - pos))
                        
                        if not marks_values:
                            continue
                        
                        # 整合所有标记数据
                        window_data = np.stack(marks_values)
                        
                        # 填充缺失值
                        window_data = np.nan_to_num(window_data, nan=0.0)
                        
                        # 确保窗口大小一致
                        if window_data.shape[1] < self.window_size:
                            # 右侧填充到固定窗口大小
                            pad_width = ((0, 0), (0, self.window_size - window_data.shape[1]))
                            window_data = np.pad(window_data, pad_width, mode='constant')
                        
                        # 确定该窗口的功能标签
                        label = self.dataset._determine_window_function(chrom, pos, window_end, window_data)
                        
                        windows.append(window_data)
                        labels.append(label)
                    
                    if windows and labels:
                        # 将处理好的数据放入GPU队列
                        windows_batch = np.stack(windows)
                        labels_batch = np.array(labels)
                        self.gpu_queue.put((windows_batch, labels_batch))
                    
                except Exception as e:
                    logger.error(f"处理区域 {chrom}:{start}-{end} 时出错: {str(e)}")
                
                # 标记任务完成
                self.cpu_queue.task_done()
                
        except queue.Empty:
            pass  # 超时，检查停止标志
        except Exception as e:
            logger.error(f"CPU工作线程出错: {str(e)}")
            logger.error(traceback.format_exc())
    
    def _gpu_worker(self):
        """GPU工作线程，负责特征提取和CNN处理"""
        try:
            # 准备批处理数据
            batch_windows = []
            batch_labels = []
            
            while not self.stop_event.is_set():
                try:
                    # 获取CPU处理好的数据
                    data = self.gpu_queue.get(timeout=5)
                    if data is None:  # 结束信号
                        break
                    
                    windows, labels = data
                    batch_windows.append(windows)
                    batch_labels.append(labels)
                    
                    # 达到批处理大小或队列为空时处理
                    if len(batch_windows) >= self.batch_size or self.gpu_queue.empty():
                        if batch_windows:
                            # 合并批次数据
                            windows_batch = np.concatenate(batch_windows, axis=0)
                            labels_batch = np.concatenate(batch_labels, axis=0)
                            
                            # 转换为张量并移动到GPU
                            windows_tensor = torch.from_numpy(windows_batch).float().to(self.device)
                            labels_tensor = torch.from_numpy(labels_batch).long().to(self.device)
                            
                            # 应用半精度计算（如果启用）
                            if self.fp16 and self.device.type == 'cuda':
                                windows_tensor = windows_tensor.half()
                            
                            # 此处可以插入CNN特征提取逻辑
                            # 目前直接使用原始特征
                            processed_features = windows_tensor
                            
                            # 将结果放入结果队列
                            self.result_queue.put((processed_features, labels_tensor))
                            
                            # 清空批次数据
                            batch_windows = []
                            batch_labels = []
                    
                    # 标记任务完成
                    self.gpu_queue.task_done()
                    
                except queue.Empty:
                    # 队列为空但有批次数据时处理
                    if batch_windows:
                        # 合并批次数据
                        windows_batch = np.concatenate(batch_windows, axis=0)
                        labels_batch = np.concatenate(batch_labels, axis=0)
                        
                        # 转换为张量并移动到GPU
                        windows_tensor = torch.from_numpy(windows_batch).float().to(self.device)
                        labels_tensor = torch.from_numpy(labels_batch).long().to(self.device)
                        
                        # 应用半精度计算（如果启用）
                        if self.fp16 and self.device.type == 'cuda':
                            windows_tensor = windows_tensor.half()
                        
                        # 将结果放入结果队列
                        self.result_queue.put((windows_tensor, labels_tensor))
                        
                        # 清空批次数据
                        batch_windows = []
                        batch_labels = []
                    
        except Exception as e:
            logger.error(f"GPU工作线程出错: {str(e)}")
            logger.error(traceback.format_exc())

@contextmanager
def timer(name):
    """
    计时上下文管理器

    用于测量代码块执行时间的简单上下文管理器

    使用方法:
        with timer("操作名称"):
            # 要计时的代码块
    """
    start_time = time.time()
    try:
        yield
    finally:
        elapsed = time.time() - start_time
        logger.info(f"{name} 耗时: {elapsed:.2f} 秒") 