import torch
from torch import nn

from torch.optim import lr_scheduler
from model import ResnetClass10

from torch.nn.parallel import DistributedDataParallel as DDP
from DataPreprocessing import get_dataloaders

from torch.distributed import init_process_group, destroy_process_group
import torch.multiprocessing as mp

from InitProcessGroup import ddp_setup

model = ResnetClass10()

loss_function = nn.CrossEntropyLoss()

lr, num_epochs = 0.0001, 120
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-5)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.8)


class Trainer:
    def __init__(self, model, train_dataloader, val_dataloader, optimizer, scheduler, loss_function, gpu_id):
        self.gpu_id = gpu_id
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.scheduler = scheduler
        self.optimizer = optimizer
        self.loss_function = loss_function
        self.model = model.to(gpu_id)
        self.model = DDP(model, device_ids=[gpu_id])

    def run_batch(self, inputs, targets):
        """
        run a batch of inputs and targets
        use model to compute loss ，the model can be any model you definition
        :param inputs ，targets
        :return  None
        """

        self.optimizer.zero_grad()
        outputs = self.model(inputs)
        loss = self.loss_function(outputs, targets)
        loss = loss.to(self.gpu_id)
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        return loss

    def run_epoch(self, epoch):
        """
        use model to running an epoch
        :param epoch:
        :return:
        """
        batch_size = len(next(iter(self.train_dataloader))[0])
        print("GPU: {} Epoch:{} Batch sizes: {} loader_size: {}".format(
            self.gpu_id,
            epoch,
            batch_size,
            len(self.train_dataloader)))

        all_loss = 0
        epoch_batch_size = 0

        self.train_dataloader.sampler.set_epoch(epoch)

        for batch_idx, data in enumerate(self.train_dataloader, 0):
            inputs, targets = data
            inputs, targets = inputs.to(self.gpu_id), targets.to(self.gpu_id)
            single_batch_loss = self.run_batch(inputs, targets).item()
            all_loss += single_batch_loss

            if batch_idx % 500 == 0:
                lr = scheduler.get_lr()
                print("[epoch {} batch {} ] single_batch_loss: {} learning rate {}".format(
                    epoch + 1,
                    batch_idx + 1,
                    single_batch_loss, lr))

            epoch_batch_size = max(batch_idx + 1, epoch_batch_size)

        print("#### Epoch {} finished all_loss {}".format(epoch + 1, all_loss / epoch_batch_size))

    def train(self, epoch_size):
        """
        use model to running an epoch
        :param epoch:
        :return:
        """
        for epoch in range(epoch_size):
            self.run_epoch(epoch)


def main(rank, world_size, max_epoches, batch_size):
    print("RANK",rank,world_size,max_epoches,batch_size)
    gpu_id = rank
    ddp_setup(gpu_id, world_size)

    train_dataloader, val_dataloader = get_dataloaders(batch_size)
    #for a in train_dataloader:
    #    print( len(a[0]))

    trainer = Trainer(model, train_dataloader, val_dataloader, optimizer, scheduler, loss_function, gpu_id)
    trainer.train(max_epoches)
    destroy_process_group()


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser(description="Distributed traning fashion minst data.")
    parser.add_argument("--max_epochs", type=int, help="max number of epochs", default=10)
    #parser.add_argument("--local-rank", type=int, help="local_rank", default=0)
    parser.add_argument("--batch_size", type=int, help="Input batch size one each device", default=32)
    args = parser.parse_args()
    world_size = torch.cuda.device_count()
    # main(world_size, args.max_epochs, args.batch_size)
    # Use mutiprocess to auto create rank id
    #train_dataloader, val_dataloader = get_dataloaders(world_size, args.batch_size)
    mp.spawn(main, args=(world_size, args.max_epochs, args.batch_size), nprocs=world_size)
    # CMD:
    # base:
    # python DDPTraiining.py --max_epochs 5 --batch_size  128
    # torchrun:
    # torchrun  --nproc_per_node=1 DDPTraiining.py --max_epochs 5 --batch_size  128
    # (instead by torchrun)
    # python -m torch.distributed.launch  --use_env  --nproc_per_node=2 DDPTraiining.py --max_epochs 5 --batch_size  16

    #destroy_process_group()
    # Reference materials:
    # https://pytorch.org/docs/stable/distributed.html
    # https://pytorch.org/docs/stable/elastic/run.html
