"""
优化的数据加载器
提高数据加载和预处理速度
"""

import os
import torch
import numpy as np
import tifffile
import cv2
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataloader import default_collate
import multiprocessing as mp
from concurrent.futures import ThreadPoolExecutor
import threading
from queue import Queue
import time
from typing import List, Dict, Any, Optional
import psutil

class OptimizedTwoDimensionalDataset(Dataset):
    """
    优化的二维数据集类
    包含数据预加载、缓存和快速预处理
    """
    
    def __init__(
        self,
        crop_dir="./",
        data_dir="./",
        data_subsets=[],
        center="center-medoid",
        type="train",
        bg_id=0,
        size=None,
        transform=None,
        translation_prob=0.5,
        max_rel_translation=0.1,
        crop_size=None,
        use_cache=True,
        cache_size=1000,
        preload_data=True,
        num_workers=4,
    ):
        """
        初始化优化的数据集
        
        Args:
            use_cache: 是否使用数据缓存
            cache_size: 缓存大小
            preload_data: 是否预加载数据
            num_workers: 预加载工作线程数
        """
        super().__init__()
        
        self.crop_dir = crop_dir
        self.data_dir = data_dir
        self.center = center
        self.type = type
        self.bg_id = bg_id
        self.size = size
        self.transform = transform
        self.crop_size = crop_size
        self.p_translation = translation_prob
        self.max_offset = max_rel_translation
        
        # 优化参数
        self.use_cache = use_cache
        self.cache_size = cache_size
        self.preload_data = preload_data
        self.num_workers = num_workers
        
        # 数据缓存
        self.data_cache = {}
        self.cache_lock = threading.Lock()
        
        # 加载数据列表
        self._load_data_lists(data_subsets)
        
        # 预加载数据
        if self.preload_data:
            self._preload_data()
    
    def _load_data_lists(self, data_subsets):
        """加载数据文件列表"""
        print(f"加载 {self.type} 数据集...")
        
        image_list = []
        instance_list = []
        center_image_list = []
        flow_image_list = []
        
        for sub_set in data_subsets:
            # 图像文件
            img_pattern = os.path.join(self.crop_dir, sub_set, "images/*.tif")
            img_files = sorted(glob.glob(img_pattern))
            image_list.extend(img_files)
            
            # 实例文件
            inst_pattern = os.path.join(self.crop_dir, sub_set, "masks/*.tif")
            inst_files = sorted(glob.glob(inst_pattern))
            instance_list.extend(inst_files)
            
            # 中心图像文件
            center_pattern = os.path.join(self.crop_dir, sub_set, f"{self.center}/center*.tif")
            center_files = sorted(glob.glob(center_pattern))
            center_image_list.extend(center_files)
            
            # 流文件
            flow_pattern = os.path.join(self.crop_dir, sub_set, f"{self.center}-flow/*.tif")
            flow_files = sorted(glob.glob(flow_pattern))
            flow_image_list.extend(flow_files)
            
            print(f"  {sub_set}: {len(img_files)} 图像, {len(inst_files)} 实例")
        
        self.image_list = image_list
        self.instance_list = instance_list
        self.center_image_list = center_image_list
        self.flow_image_list = flow_image_list
        
        # 生成图像对
        self.pair_index = self._get_image_pairs()
        self.n_pairs = len(self.pair_index)
        
        print(f"总图像对数: {self.n_pairs}")
    
    def _get_image_pairs(self):
        """获取图像对索引"""
        pairs = []
        
        for i, path_img_file in enumerate(self.image_list):
            path_img, name_img = os.path.split(path_img_file)
            name_img, ending = name_img.split(".")
            time, patch_id = name_img.split("_")
            
            if time.startswith('t'):
                time_int = int(time[1:])
                time_prefix = "t"
            else:
                time_int = int(time)
                time_prefix = ""
            
            # 下一个时间点的图像
            name_next_img = (
                "_".join([time_prefix + str(time_int + 1).zfill(len(time.lstrip('t'))), patch_id]) + "." + ending
            )
            next_img_path = os.path.join(path_img, name_next_img)
            
            # 流图像
            flow_img_name = (
                "_".join([f"center{str(time_int + 1).zfill(3)}", f"center{str(time_int).zfill(3)}", patch_id])
                + "." + ending
            )
            path_flow_img = os.path.join(
                os.path.dirname(path_img),
                "-".join([self.center, "flow"]),
                flow_img_name,
            )
            
            try:
                next_img_index = self.image_list.index(next_img_path)
                flow_index = self.flow_image_list.index(path_flow_img)
                pairs.append((next_img_index, i, flow_index))
            except ValueError:
                continue
        
        return pairs
    
    def _preload_data(self):
        """预加载数据到内存"""
        print("预加载数据到内存...")
        
        if not self.use_cache:
            return
        
        # 使用线程池预加载
        with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
            # 预加载前cache_size个样本
            preload_indices = list(range(min(self.cache_size, len(self.pair_index))))
            futures = [executor.submit(self._load_sample_data, idx) for idx in preload_indices]
            
            for i, future in enumerate(futures):
                try:
                    data = future.result()
                    with self.cache_lock:
                        self.data_cache[preload_indices[i]] = data
                except Exception as e:
                    print(f"预加载样本 {preload_indices[i]} 失败: {e}")
        
        print(f"预加载完成，缓存大小: {len(self.data_cache)}")
    
    def _load_sample_data(self, index):
        """加载单个样本的数据"""
        index_curr, index_prev, flow_index = self.pair_index[index]
        
        # 加载图像
        image_curr = tifffile.imread(self.image_list[index_curr])
        image_prev = tifffile.imread(self.image_list[index_prev])
        
        # 加载实例
        instance_curr = tifffile.imread(self.instance_list[index_curr])
        instance_prev = tifffile.imread(self.instance_list[index_prev])
        
        # 加载中心图像
        center_image_curr = tifffile.imread(self.center_image_list[index_curr])
        center_image_prev = tifffile.imread(self.center_image_list[index_prev])
        
        # 加载流
        flow = tifffile.imread(self.flow_image_list[flow_index])
        
        return {
            'image_curr': image_curr,
            'image_prev': image_prev,
            'instance_curr': instance_curr,
            'instance_prev': instance_prev,
            'center_image_curr': center_image_curr,
            'center_image_prev': center_image_prev,
            'flow': flow,
        }
    
    def __len__(self):
        return len(self.pair_index) if self.size is None else self.size
    
    def __getitem__(self, index):
        """获取数据样本"""
        # 检查缓存
        if self.use_cache and index in self.data_cache:
            sample = self.data_cache[index]
        else:
            # 动态加载
            sample = self._load_sample_data(index)
            
            # 添加到缓存
            if self.use_cache and len(self.data_cache) < self.cache_size:
                with self.cache_lock:
                    if len(self.data_cache) < self.cache_size:
                        self.data_cache[index] = sample
        
        # 应用变换
        if self.transform is not None:
            sample = self.transform(sample)
        
        return sample

def create_optimized_dataloader(
    dataset,
    batch_size=16,
    shuffle=True,
    num_workers=None,
    pin_memory=True,
    persistent_workers=True,
    prefetch_factor=2,
    drop_last=False,
):
    """
    创建优化的数据加载器
    
    Args:
        dataset: 数据集
        batch_size: 批次大小
        shuffle: 是否打乱数据
        num_workers: 工作线程数
        pin_memory: 是否使用固定内存
        persistent_workers: 是否保持工作线程
        prefetch_factor: 预取因子
        drop_last: 是否丢弃最后不完整的批次
    """
    if num_workers is None:
        num_workers = min(mp.cpu_count() // 2, 8)
    
    # 优化的collate函数
    def optimized_collate_fn(batch):
        """优化的数据整理函数"""
        try:
            return default_collate(batch)
        except Exception as e:
            print(f"数据整理失败: {e}")
            # 返回空批次
            return None
    
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=pin_memory,
        persistent_workers=persistent_workers,
        prefetch_factor=prefetch_factor,
        drop_last=drop_last,
        collate_fn=optimized_collate_fn,
    )

class FastTransforms:
    """快速变换类"""
    
    @staticmethod
    def fast_normalize(image, mean=0.5, std=0.5):
        """快速归一化"""
        if isinstance(image, np.ndarray):
            return (image.astype(np.float32) - mean) / std
        elif isinstance(image, torch.Tensor):
            return (image.float() - mean) / std
        return image
    
    @staticmethod
    def fast_resize(image, size, interpolation=cv2.INTER_LINEAR):
        """快速调整大小"""
        if isinstance(image, np.ndarray):
            return cv2.resize(image, size, interpolation=interpolation)
        elif isinstance(image, torch.Tensor):
            return torch.nn.functional.interpolate(
                image.unsqueeze(0), size=size, mode='bilinear', align_corners=False
            ).squeeze(0)
        return image
    
    @staticmethod
    def fast_crop(image, top_left, crop_size):
        """快速裁剪"""
        if isinstance(image, np.ndarray):
            y, x = top_left
            return image[y:y+crop_size, x:x+crop_size]
        elif isinstance(image, torch.Tensor):
            y, x = top_left
            return image[:, y:y+crop_size, x:x+crop_size]
        return image

# 性能监控
class DataLoaderMonitor:
    """数据加载器性能监控"""
    
    def __init__(self):
        self.load_times = []
        self.batch_times = []
        self.start_time = None
    
    def start_batch(self):
        """开始批次计时"""
        self.start_time = time.time()
    
    def end_batch(self, batch_size):
        """结束批次计时"""
        if self.start_time is not None:
            batch_time = time.time() - self.start_time
            self.batch_times.append(batch_time)
            self.load_times.append(batch_time / batch_size)
    
    def get_stats(self):
        """获取统计信息"""
        if not self.load_times:
            return {}
        
        return {
            'avg_load_time': np.mean(self.load_times),
            'avg_batch_time': np.mean(self.batch_times),
            'total_samples': len(self.load_times),
            'samples_per_second': len(self.load_times) / sum(self.batch_times),
        }
    
    def print_stats(self):
        """打印统计信息"""
        stats = self.get_stats()
        if stats:
            print(f"数据加载统计:")
            print(f"  平均加载时间: {stats['avg_load_time']:.4f}秒/样本")
            print(f"  平均批次时间: {stats['avg_batch_time']:.4f}秒/批次")
            print(f"  总样本数: {stats['total_samples']}")
            print(f"  加载速度: {stats['samples_per_second']:.2f} 样本/秒") 