"""
麻将数据加载器
"""
import os
import json
import numpy as np
from typing import Dict, List, Optional, Tuple, Union, Iterable
import torch
from torch.utils.data import Dataset, DataLoader, Sampler
import logging
import glob
import random
from concurrent.futures import ThreadPoolExecutor

from sichuanmajiang.data.preprocessor import DataPreprocessor


class MahjongDataset(Dataset):
    """
    麻将数据集类 - 用于PyTorch的数据加载
    """
    def __init__(self, 
                 data_files: Union[str, List[str]],
                 preprocessor: Optional[DataPreprocessor] = None,
                 load_in_memory: bool = True,
                 transform: Optional[callable] = None):
        """
        Args:
            data_files: 数据文件路径或文件列表
            preprocessor: 数据预处理器
            load_in_memory: 是否将数据加载到内存
            transform: 数据转换函数
        """
        self.preprocessor = preprocessor
        self.load_in_memory = load_in_memory
        self.transform = transform
        
        # 初始化日志
        self.logger = logging.getLogger('MahjongDataset')
        
        # 处理文件路径
        if isinstance(data_files, str):
            # 如果是目录，加载所有JSON文件
            if os.path.isdir(data_files):
                self.data_files = glob.glob(os.path.join(data_files, '*.json'))
            else:
                # 如果是单个文件
                self.data_files = [data_files]
        else:
            self.data_files = data_files
        
        # 验证文件存在
        self.data_files = [f for f in self.data_files if os.path.exists(f)]
        if not self.data_files:
            raise FileNotFoundError(f"找不到数据文件: {data_files}")
        
        self.logger.info(f"找到 {len(self.data_files)} 个数据文件")
        
        # 数据缓存
        self.data_cache = []
        self.file_indices = []  # 记录每个数据点对应的文件索引和内部索引
        
        # 预加载数据到内存
        if self.load_in_memory:
            self._load_data_to_memory()
        else:
            # 仅扫描文件，建立索引
            self._scan_files()
    
    def _load_data_to_memory(self) -> None:
        """
        预加载数据到内存
        """
        self.logger.info("开始加载数据到内存...")
        
        for file_idx, file_path in enumerate(self.data_files):
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    
                # 添加到缓存
                self.data_cache.extend(data)
                # 记录索引
                self.file_indices.extend([(file_idx, i) for i in range(len(data))])
                
                self.logger.info(f"加载文件: {file_path}, 数据点: {len(data)}")
            except Exception as e:
                self.logger.error(f"加载文件失败: {file_path}, 错误: {e}")
        
        self.logger.info(f"数据加载完成，共 {len(self.data_cache)} 个数据点")
    
    def _scan_files(self) -> None:
        """
        扫描文件，建立索引（不加载数据）
        """
        self.logger.info("开始扫描数据文件...")
        
        for file_idx, file_path in enumerate(self.data_files):
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    
                # 仅记录索引，不加载数据
                self.file_indices.extend([(file_idx, i) for i in range(len(data))])
                
                self.logger.info(f"扫描文件: {file_path}, 数据点: {len(data)}")
            except Exception as e:
                self.logger.error(f"扫描文件失败: {file_path}, 错误: {e}")
        
        self.logger.info(f"文件扫描完成，共 {len(self.file_indices)} 个数据点")
    
    def _load_data_point(self, file_idx: int, internal_idx: int) -> Dict:
        """
        加载单个数据点
        
        Args:
            file_idx: 文件索引
            internal_idx: 文件内部索引
            
        Returns:
            数据点字典
        """
        if self.load_in_memory:
            # 从缓存中获取
            return self.data_cache[internal_idx]
        else:
            # 按需加载文件
            file_path = self.data_files[file_idx]
            try:
                with open(file_path, 'r', encoding='utf-8') as f:
                    data = json.load(f)
                    return data[internal_idx]
            except Exception as e:
                self.logger.error(f"加载数据点失败: {file_path}[{internal_idx}], 错误: {e}")
                return {}
    
    def __len__(self) -> int:
        """
        返回数据集大小
        """
        return len(self.file_indices)
    
    def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
        """
        获取单个数据样本
        
        Args:
            idx: 样本索引
            
        Returns:
            样本字典
        """
        # 获取文件索引和内部索引
        file_idx, internal_idx = self.file_indices[idx]
        
        # 加载数据点
        data_point = self._load_data_point(file_idx, internal_idx)
        
        # 预处理特征
        if self.preprocessor:
            features = self.preprocessor._extract_features(data_point)
        else:
            # 如果没有预处理器，尝试直接获取特征
            features = np.array(data_point.get('features', []))
        
        # 提取标签
        if self.preprocessor:
            label = self.preprocessor._extract_label(data_point)
        else:
            label = data_point.get('action_index', 0)
        
        # 转换为PyTorch张量
        sample = {
            'features': torch.FloatTensor(features),
            'label': torch.LongTensor([label])[0]
        }
        
        # 添加其他可能有用的信息
        if 'reward' in data_point:
            sample['reward'] = torch.FloatTensor([data_point['reward']])[0]
        
        if 'next_state' in data_point:
            # 处理下一个状态
            if self.preprocessor and isinstance(data_point['next_state'], dict):
                next_features = self.preprocessor._extract_features(data_point['next_state'])
            else:
                next_features = np.array(data_point['next_state'])
            sample['next_features'] = torch.FloatTensor(next_features)
        
        if 'done' in data_point:
            sample['done'] = torch.BoolTensor([data_point['done']])[0]
        
        # 应用转换
        if self.transform:
            sample = self.transform(sample)
        
        return sample


class BalancedSampler(Sampler):
    """
    平衡采样器 - 确保各类别的样本被均匀采样
    """
    def __init__(self, dataset: Dataset, num_classes: int = 34, replacement: bool = True):
        """
        Args:
            dataset: 数据集
            num_classes: 类别数量
            replacement: 是否替换采样
        """
        self.dataset = dataset
        self.num_classes = num_classes
        self.replacement = replacement
        
        # 构建类别到索引的映射
        self.class_indices = {i: [] for i in range(num_classes)}
        
        # 扫描数据集，构建映射
        self.logger = logging.getLogger('BalancedSampler')
        self.logger.info("构建类别索引映射...")
        
        for idx in range(len(dataset)):
            try:
                # 直接访问文件索引，避免加载整个数据点
                _, internal_idx = dataset.file_indices[idx]
                if dataset.load_in_memory and internal_idx < len(dataset.data_cache):
                    data_point = dataset.data_cache[internal_idx]
                    if dataset.preprocessor:
                        label = dataset.preprocessor._extract_label(data_point)
                    else:
                        label = data_point.get('action_index', -1)
                    
                    if 0 <= label < num_classes:
                        self.class_indices[label].append(idx)
            except Exception as e:
                self.logger.error(f"处理索引 {idx} 失败: {e}")
        
        # 计算每个类别的样本数量
        self.class_counts = {c: len(indices) for c, indices in self.class_indices.items()}
        self.logger.info(f"类别分布: {self.class_counts}")
        
        # 找到最小类别的样本数
        self.min_count = min(self.class_counts.values()) if self.class_counts else 0
        self.logger.info(f"最小类别样本数: {self.min_count}")
    
    def __iter__(self) -> Iterable[int]:
        """
        返回采样索引的迭代器
        """
        if self.min_count == 0:
            # 如果没有有效数据，返回空迭代器
            return iter([])
        
        # 对于每个类别，随机采样min_count个样本
        sampled_indices = []
        
        for class_idx in range(self.num_classes):
            if len(self.class_indices[class_idx]) > 0:
                if self.replacement:
                    # 有放回采样
                    samples = random.choices(self.class_indices[class_idx], k=self.min_count)
                else:
                    # 无放回采样，取min_count和实际数量的较小值
                    sample_count = min(self.min_count, len(self.class_indices[class_idx]))
                    samples = random.sample(self.class_indices[class_idx], sample_count)
                
                sampled_indices.extend(samples)
        
        # 打乱采样结果
        random.shuffle(sampled_indices)
        
        return iter(sampled_indices)
    
    def __len__(self) -> int:
        """
        返回采样器的大小
        """
        return self.min_count * self.num_classes


class DataLoaderBuilder:
    """
    数据加载器构建器
    """
    @staticmethod
    def build_dataloader(
        data_path: str,
        batch_size: int = 64,
        shuffle: bool = True,
        num_workers: int = 4,
        preprocessor: Optional[DataPreprocessor] = None,
        use_balanced_sampler: bool = False,
        load_in_memory: bool = True,
        pin_memory: bool = True,
        persistent_workers: bool = True
    ) -> DataLoader:
        """
        构建数据加载器
        
        Args:
            data_path: 数据路径
            batch_size: 批量大小
            shuffle: 是否打乱数据
            num_workers: 工作线程数
            preprocessor: 数据预处理器
            use_balanced_sampler: 是否使用平衡采样器
            load_in_memory: 是否加载到内存
            pin_memory: 是否使用固定内存
            persistent_workers: 是否使用持久化工作线程
            
        Returns:
            数据加载器实例
        """
        # 创建数据集
        dataset = MahjongDataset(
            data_path,
            preprocessor=preprocessor,
            load_in_memory=load_in_memory
        )
        
        # 选择采样器
        sampler = None
        if use_balanced_sampler:
            sampler = BalancedSampler(dataset)
            shuffle = False  # 使用采样器时不需要shuffle
        
        # 创建数据加载器
        dataloader = DataLoader(
            dataset,
            batch_size=batch_size,
            shuffle=shuffle,
            sampler=sampler,
            num_workers=num_workers,
            pin_memory=pin_memory,
            persistent_workers=persistent_workers,
            collate_fn=DataLoaderBuilder._collate_fn
        )
        
        return dataloader
    
    @staticmethod
    def _collate_fn(batch: List[Dict]) -> Dict[str, torch.Tensor]:
        """
        自定义批处理函数
        
        Args:
            batch: 样本列表
            
        Returns:
            合并后的批次字典
        """
        # 合并相同键的张量
        collated = {}
        
        # 获取所有可能的键
        all_keys = set()
        for sample in batch:
            all_keys.update(sample.keys())
        
        # 合并每个键
        for key in all_keys:
            # 检查所有样本是否都有这个键
            has_key = [key in sample for sample in batch]
            if any(has_key):
                # 只合并有这个键的样本
                tensors = [sample[key] for sample, has in zip(batch, has_key) if has]
                
                # 堆叠张量
                try:
                    collated[key] = torch.stack(tensors)
                except Exception as e:
                    # 如果堆叠失败，记录错误并跳过
                    logger = logging.getLogger('DataLoaderBuilder')
                    logger.error(f"批处理键 '{key}' 时出错: {e}")
        
        return collated


class ParallelDataLoader:
    """
    并行数据加载器 - 用于加载多个数据集
    """
    def __init__(self, dataloaders: List[DataLoader], weights: Optional[List[float]] = None):
        """
        Args:
            dataloaders: 数据加载器列表
            weights: 权重列表，用于决定从每个加载器中采样的概率
        """
        self.dataloaders = dataloaders
        self.weights = weights or [1.0 / len(dataloaders)] * len(dataloaders)
        self.iterators = [iter(dl) for dl in dataloaders]
    
    def __iter__(self) -> Iterable[Dict[str, torch.Tensor]]:
        """
        返回迭代器
        """
        while True:
            try:
                # 根据权重选择一个数据加载器
                loader_idx = random.choices(range(len(self.dataloaders)), weights=self.weights, k=1)[0]
                
                # 获取下一个批次
                batch = next(self.iterators[loader_idx])
                
                # 添加数据源标识
                batch['source'] = torch.tensor([loader_idx] * len(batch['features']))
                
                yield batch
            except StopIteration:
                # 重置迭代器
                self.iterators[loader_idx] = iter(self.dataloaders[loader_idx])
                
                # 尝试从其他加载器获取数据
                found = False
                for i in range(len(self.dataloaders)):
                    if i != loader_idx:
                        try:
                            batch = next(self.iterators[i])
                            batch['source'] = torch.tensor([i] * len(batch['features']))
                            yield batch
                            found = True
                            break
                        except StopIteration:
                            self.iterators[i] = iter(self.dataloaders[i])
                
                if not found:
                    # 所有加载器都已耗尽
                    break
    
    def __len__(self) -> int:
        """
        返回估计的长度
        """
        return sum(len(dl) for dl in self.dataloaders)


def create_dataset(data_path: str, 
                  preprocessor: Optional[DataPreprocessor] = None,
                  load_in_memory: bool = True) -> MahjongDataset:
    """
    创建数据集的工厂函数
    
    Args:
        data_path: 数据路径
        preprocessor: 数据预处理器
        load_in_memory: 是否加载到内存
        
    Returns:
        数据集实例
    """
    return MahjongDataset(
        data_path,
        preprocessor=preprocessor,
        load_in_memory=load_in_memory
    )


def create_dataloader(dataset: MahjongDataset,
                     batch_size: int = 64,
                     shuffle: bool = True,
                     num_workers: int = 4,
                     use_balanced_sampler: bool = False,
                     **kwargs) -> DataLoader:
    """
    创建数据加载器的工厂函数
    
    Args:
        dataset: 数据集
        batch_size: 批量大小
        shuffle: 是否打乱数据
        num_workers: 工作线程数
        use_balanced_sampler: 是否使用平衡采样器
        **kwargs: 其他参数
        
    Returns:
        数据加载器实例
    """
    # 选择采样器
    sampler = None
    if use_balanced_sampler:
        sampler = BalancedSampler(dataset)
        shuffle = False
    
    # 创建数据加载器
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        sampler=sampler,
        num_workers=num_workers,
        collate_fn=DataLoaderBuilder._collate_fn,
        **kwargs
    )
    
    return dataloader


def load_and_split_data(data_path: str,
                        preprocessor: Optional[DataPreprocessor] = None,
                        train_ratio: float = 0.7,
                        val_ratio: float = 0.15,
                        test_ratio: float = 0.15,
                        batch_size: int = 64,
                        **kwargs) -> Tuple[DataLoader, DataLoader, DataLoader]:
    """
    加载数据并分割为训练集、验证集和测试集
    
    Args:
        data_path: 数据路径
        preprocessor: 数据预处理器
        train_ratio: 训练集比例
        val_ratio: 验证集比例
        test_ratio: 测试集比例
        batch_size: 批量大小
        **kwargs: 其他参数
        
    Returns:
        (训练数据加载器, 验证数据加载器, 测试数据加载器)
    """
    # 创建完整的数据集
    dataset = create_dataset(data_path, preprocessor, **kwargs)
    
    # 计算分割大小
    total_size = len(dataset)
    train_size = int(total_size * train_ratio)
    val_size = int(total_size * val_ratio)
    test_size = total_size - train_size - val_size
    
    # 分割数据集
    train_dataset, val_dataset, test_dataset = torch.utils.data.random_split(
        dataset,
        [train_size, val_size, test_size]
    )
    
    # 创建数据加载器
    train_loader = create_dataloader(train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
    val_loader = create_dataloader(val_dataset, batch_size=batch_size, shuffle=False, **kwargs)
    test_loader = create_dataloader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
    
    return train_loader, val_loader, test_loader


def create_batch_generator(dataloader: DataLoader,
                           num_batches: Optional[int] = None,
                           infinite: bool = False) -> Iterable[Dict[str, torch.Tensor]]:
    """
    创建批次生成器
    
    Args:
        dataloader: 数据加载器
        num_batches: 生成的批次数
        infinite: 是否无限循环
        
    Returns:
        批次迭代器
    """
    batch_count = 0
    
    while True:
        for batch in dataloader:
            yield batch
            
            batch_count += 1
            if num_batches is not None and batch_count >= num_batches:
                return
        
        if not infinite:
            break