import torch
from torch.utils.data import DataLoader, random_split
from torchensemble import VotingClassifier  # 或者 VotingRegressor, 取决于任务类型
from pointcept.datasets import DATASETS
from pointcept.models import MODELS
from pointcept.engines.defaults import (
    default_argument_parser,
    default_config_parser,
    default_setup,
)
from pointcept.engines.train import Trainer
from pointcept.engines.launch import launch


# 将数据集分割成多个子数据集
def split_dataset(dataset, num_splits):
    dataset_size = len(dataset)
    split_size = dataset_size // num_splits
    lengths = [split_size] * num_splits
    lengths[-1] += dataset_size % num_splits  # 调整最后一个子集的大小以容纳剩余的数据点
    return random_split(dataset, lengths)


# 训练集成模型中的单个成员
def train_ensemble_member(trainer, dataloader, device):
    trainer.model.to(device)  # 将模型移动到指定设备
    trainer.train(dataloader=dataloader)  # 使用给定的数据加载器训练模型


# 用于训练集成模型的特制训练器
class EnsembleTrainer(Trainer):
    def __init__(self, cfg, ensemble_model, device):
        super().__init__(cfg)
        self.ensemble_model = ensemble_model  # 整个集成模型
        self.device = device  # 设备信息

    # 训练集成模型中的所有成员
    def train(self, dataloaders=None):
        if dataloaders is None:
            raise ValueError("必须提供数据加载器。")

        for i, dataloader in enumerate(dataloaders):
            print(f"正在训练第 {i + 1} 个模型...")
            train_ensemble_member(self, dataloader, self.device)


# 主要工作函数
def main_worker(cfg):
    cfg = default_setup(cfg)

    # 准备数据集
    dataset = DATASETS.build(dict(type=cfg.dataset.type, **cfg.dataset.kwargs))
    dataloaders = [
        DataLoader(split, batch_size=cfg.batch_size, shuffle=True)
        for split in split_dataset(dataset, cfg.num_models)
    ]

    # 准备集成模型
    base_estimator = MODELS.build(dict(type=cfg.model.type, **cfg.model.kwargs))
    ensemble_model = VotingClassifier(estimator=base_estimator, n_estimators=cfg.num_models)

    # 初始化 EnsembleTrainer
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    trainer = EnsembleTrainer(cfg, ensemble_model, device)

    # 训练每个成员模型
    trainer.train(dataloaders=dataloaders)


# 主函数
def main():
    args = default_argument_parser().parse_args()
    cfg = default_config_parser(args.config_file, args.options)

    # 启动分布式训练
    launch(
        main_worker,
        num_gpus_per_machine=args.num_gpus,
        num_machines=args.num_machines,
        machine_rank=args.machine_rank,
        dist_url=args.dist_url,
        cfg=(cfg,),
    )


# 如果脚本直接运行，则执行主函数
if __name__ == "__main__":
    main()