
import os
import random
import importlib.util
from typing import Optional, Tuple, Dict, Any

import torch
from torch.utils.data import DataLoader, random_split, Dataset
from torchvision import datasets, transforms

from config_manager import AugmentConfig
from config_manager import DatasetConfig
from data_manager.processors.data_processor import DataProcessor


class DatasetManager:
    def __init__(self, dataset_config: DatasetConfig):
        self.dataset_config = dataset_config
        self.augment_config = self.dataset_config.get_augment_config()
        self._set_seed(dataset_config.seed)
        self.data_processor = DataProcessor(self.augment_config, dataset_config.data_modal)
        self.data_augment = self.build_data_augment()
        self.dataset = self.load_dataset()

    def build_data_augment(self):
        return self.data_processor.transform


    @staticmethod
    def _set_seed(seed: int):
        random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)

    def load_dataset(self) -> Dict[str, Dataset]:
        """主入口函数"""
        if self.dataset_config.dataset_type == "builtin":
            return self._load_builtin_datasets()
        elif self.dataset_config.dataset_type == "local":
            return self.get_local_data()
        else:
            raise ValueError(f"未知的数据集类型: {self.dataset_config.dataset_type}")

    def _load_builtin_datasets(self) -> Dict[str, Dataset]:
        if hasattr(datasets, self.dataset_config.dataset_name):
            dataset = self.get_datasets_data()

        return dataset

    def get_datasets_data(self) -> Dict[str, Dataset]:
        dataset_cls = getattr(datasets, self.dataset_config.dataset_name)

        loaders = [
            dict(train=True, test=False),  # torchvision 风格
            dict(split="train", split_test="test")  # huggingface / torchvision newer
        ]
        train_dataset, test_dataset = None, None
        for loader in loaders:
            try:
                if "train" in loader:
                    train_dataset = dataset_cls(
                        root=self.dataset_config.data_dir,
                        train=loader["train"],
                        download=True,
                        transform=self.data_augment,
                    )
                    test_dataset = dataset_cls(
                        root=self.dataset_config.data_dir,
                        train=loader["test"],
                        download=True,
                    )
                else:
                    train_dataset = dataset_cls(
                        root=self.dataset_config.data_dir,
                        split=loader["split"],
                        download=True,
                        transform=self.data_augment,
                    )
                    test_dataset = dataset_cls(
                        root=self.dataset_config.data_dir,
                        split=loader["split_test"],
                        download=True,
                    )
                break  # 成功加载就跳出
            except Exception as e:
                continue

        # 拆分 train/val/test
        total_len = len(train_dataset)
        train_len = int(self.dataset_config.split_ratio[0] * total_len)
        val_len = total_len - train_len
        train_set, val_set = random_split(train_dataset, [train_len, val_len])
        dataset = {
            "train": train_set,
            "val": val_set,
            "test": test_dataset,
        }
        return dataset

    def get_local_data(self):
        if not os.path.exists(self.dataset_config.local_dataset_path):
            raise FileNotFoundError(f"数据集文件不存在: {self.dataset_config.local_dataset_path}")

        spec = importlib.util.spec_from_file_location("custom_dataset", self.dataset_config.local_dataset_path)
        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)

        dataset_cls = getattr(module, self.dataset_config.local_dataset_class, None)
        if dataset_cls is None:
            raise ValueError(
                f"在 {self.dataset_config.local_dataset_path} 中未找到类 {self.dataset_config.local_dataset_class}")

        # 传入本地路径与 transform
        train_set = dataset_cls(self.dataset_config.local_train_dir, transform=transform)
        val_set = dataset_cls(self.dataset_config.local_val_dir,
                              transform=transform) if self.dataset_config.local_val_dir else None
        test_set = dataset_cls(self.dataset_config.local_test_dir,
                               transform=transform) if self.dataset_config.local_test_dir else None
        return train_set, val_set, test_set


    # ========================
    # 2. 本地自定义数据集
    # ========================
    def _load_local_dataset(self) -> Dict[str, Dataset]:
        """假设本地目录结构为：
        local_train_dir/
            class1/
            class2/
        local_val_dir/
            class1/
            class2/
        """
        if not self.config.local_train_dir:
            raise ValueError("本地数据集模式下必须提供 local_train_dir")

        train_set = datasets.ImageFolder(self.config.local_train_dir, transform=self.transform)
        val_set = datasets.ImageFolder(self.config.local_val_dir,
                                       transform=self.transform) if self.config.local_val_dir else None
        test_set = datasets.ImageFolder(self.config.local_test_dir,
                                        transform=self.transform) if self.config.local_test_dir else None

        return self._make_dataloaders(train_set, val_set, test_set)

    # ========================
    # 构建 DataLoader
    # ========================
    def _make_dataloaders(self, train_set, val_set, test_set):
        loaders = {}
        if train_set:
            loaders["train"] = DataLoader(train_set, batch_size=self.config.batch_size,
                                          shuffle=self.config.shuffle,
                                          num_workers=self.config.num_workers,
                                          pin_memory=self.config.pin_memory,
                                          drop_last=self.config.drop_last)
        if val_set:
            loaders["val"] = DataLoader(val_set, batch_size=self.config.batch_size,
                                        shuffle=False,
                                        num_workers=self.config.num_workers)
        if test_set:
            loaders["test"] = DataLoader(test_set, batch_size=self.config.batch_size,
                                         shuffle=False,
                                         num_workers=self.config.num_workers)
        return loaders
