#!/usr/bin/env python3

import os
import argparse

import torch
import loguru
import deepspeed
import torchvision
import torch.distributed      as dist
import torchvision.transforms as transforms

from   torchvision.models     import AlexNet
from   deepspeed.pipe         import PipelineModule
from   deepspeed.utils        import RepeatingLoader


logger = loguru.logger

def cifar_trainset(local_rank, dl_path='cifar10'):
    transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
    ])

    
    dist.barrier()                 #! 创建一个同步点，所有进程都运行到这里才会继续

    #! case-1 保证只有local_rank=0加载数据集
    # if local_rank == 0:
    #     trainset = torchvision.datasets.CIFAR10(root=dl_path, train=True,
    #                                 download=True, transform=transform)
    #     dist.barrier()
    #     return trainset
    # else:
    #     dist.barrier()
    #     return 

    #! case-2 保证只有local_rank=0下载载数据集，所有rank都加载
    if local_rank != 0:
        dist.barrier()
    trainset = torchvision.datasets.CIFAR10(root=dl_path, train=True,
                            download=True, transform=transform)
    if local_rank == 0:
        dist.barrier()
    return trainset

def get_args():
    parser = argparse.ArgumentParser(description='CIFAR')
    parser.add_argument('--local_rank',
                        type=int,
                        default=-1,
                        help='local rank passed from distributed launcher')
    parser.add_argument('-s',
                        '--steps',
                        type=int,
                        default=100,
                        help='quit after this many steps')
    parser.add_argument('-p',
                        '--pipeline-parallel-size',
                        type=int,
                        default=0,
                        help='pipeline parallelism')
    parser.add_argument('--backend',
                        type=str,
                        default='nccl',
                        help='distributed backend')
    parser.add_argument('--seed', type=int, default=1138, help='PRNG seed')

    #! 更新参数解释器, 将deepspeed的命令行参数也传进来
    parser = deepspeed.add_config_arguments(parser)
    args   = parser.parse_args()
    return args

#! 返回模型层的列表
def join_layers(vision_model):
    layers = [
        *vision_model.features,
        vision_model.avgpool,
        lambda x: torch.flatten(x, 1),
        *vision_model.classifier,
    ]
    return layers

def train_pipeline(args, part="parameters"):
    torch.manual_seed(args.seed)
    deepspeed.runtime.utils.set_random_seed(args.seed)

    net = AlexNet(num_classes=10)

    #! RANK-0   
    # (0): Conv2d(3, 64, kernel_size=(11, 11), stride=(4, 4), padding=(2, 2))
    # (1): ReLU(inplace=True)
    # (2): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    # (3): Conv2d(64, 192, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2))
    # (4): ReLU(inplace=True)
    # (5): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    # (6): Conv2d(192, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    # (7): ReLU(inplace=True)
    # (8): Conv2d(384, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    # (9): ReLU(inplace=True)
    # (10): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
    # (11): ReLU(inplace=True)
    # (12): MaxPool2d(kernel_size=3, stride=2, padding=0, dilation=1, ceil_mode=False)
    # (13): AdaptiveAvgPool2d(output_size=(6, 6))
    # (15): Dropout(p=0.2, inplace=False)
    # (16): Linear(in_features=9216, out_features=4096, bias=True)
    # (17): ReLU(inplace=True)
    # (18): Dropout(p=0.2, inplace=False)

    #! RANK-1
    # (19): Linear(in_features=4096, out_features=4096, bias=True)
    # (20): ReLU(inplace=True)
    # (21): Linear(in_features=4096, out_features=10, bias=True)
    net = PipelineModule(layers=join_layers(net),                # 模型的层列表
                        loss_fn=torch.nn.CrossEntropyLoss(),
                        num_stages= args.pipeline_parallel_size, # 切分为几块
                        partition_method=part,                   # 层切分的方式
                        activation_checkpoint_interval=0         # 激活检查点的层数
                    )

    # logger.info(f"rank = {int(os.environ['LOCAL_RANK'])}, net = {net}")
    dataset = cifar_trainset(args.local_rank)
    # logger.info(f"rank = {int(os.environ['LOCAL_RANK'])}, dataset = {dataset}")

    #! TODO: local_rank != 0的dataset为None，看是否会报错
    engine, _, _, _ = deepspeed.initialize(model=net, args=args, training_data=dataset,
                        model_parameters=[p for p in net.parameters() if p.requires_grad])

    #! 没有任何优化器和损失函数相关的内容
    for step in range(args.steps):
        loss = engine.train_batch()

# deepspeed train_alexnet_ds.py  --deepspeed_config=ds_config.json -p 2 --steps=200
if __name__ == '__main__':
    args = get_args()

    #  args = Namespace(backend='nccl', deepscale=False, deepscale_config=None, deepspeed=True, 
    #  deepspeed_config='ds_config.json', local_rank=0, pipeline_parallel_size=2, seed=1138, steps=200) 
    #  from local_rank 0
    # logger.error(f"args = {args} from local_rank {int(os.environ['LOCAL_RANK'])}")

    deepspeed.init_distributed(dist_backend=args.backend)
    args.local_rank = int(os.environ['LOCAL_RANK'])
    torch.cuda.set_device(args.local_rank)

    logger.warning(f"local_rank = {args.local_rank}, pipe_size = {args.pipeline_parallel_size}")
    train_pipeline(args)