import torch
from torch import nn
from DataPreprocessing import train_dataloader, val_dataloader
from torch.optim import lr_scheduler
from modelParallel import ModelParallelResNet50

device1 = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
device2 = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

model = ModelParallelResNet50(device1,device2)

loss_function = nn.CrossEntropyLoss()

lr, num_epochs = 0.0001, 120
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-5)
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[40, 60, 80], gamma=0.8)


class Trainer:
    def __init__(self, model, train_dataloader, val_dataloader, optimizer, scheduler, loss_function, device1,device2):
        self.device1 = device1
        self.device2 = device2
        self.train_dataloader = train_dataloader
        self.val_dataloader = val_dataloader
        self.scheduler = scheduler
        self.optimizer = optimizer
        self.loss_function = loss_function
        self.model = model

    def run_batch(self, inputs, targets):
        """
        run a batch of inputs and targets
        use model to compute loss ，the model can be any model you definition
        :param inputs ，targets
        :return  None
        """
        self.optimizer.zero_grad()
        outputs = self.model(inputs)
        outputs.to(self.device2)
        targets.to(self.device2)
        self.loss_function = self.loss_function.to(self.device2)

        loss = self.loss_function(outputs, targets)
        loss = loss.to(device2)
        loss.backward()
        self.optimizer.step()
        self.scheduler.step()
        return loss

    def run_epoch(self, epoch):
        """
        use model to running an epoch
        :param epoch:
        :return:
        """
        all_loss = 0
        epoch_batch_size = 0


        batch_size = len(next(iter(self.train_dataloader))[0])
        print("GPU: {} Epoch:{} Batch sizes: {} loader_size: {}".format(
            "cuda",
            epoch,
            batch_size,
            len(self.train_dataloader)))


        for batch_idx, data in enumerate(self.train_dataloader, 0):
            inputs, targets = data
            inputs, targets = inputs.to(self.device1), targets.to(self.device2)
            single_batch_loss = self.run_batch(inputs, targets).item()
            all_loss += single_batch_loss

            if batch_idx % 500 == 0:
                lr = scheduler.get_lr()
                print("[epoch {} batch {} ] single_batch_loss: {} learning rate {}".format(
                    epoch + 1,
                    batch_idx + 1,
                    single_batch_loss, lr))

            epoch_batch_size = max(batch_idx + 1, epoch_batch_size)

        print("#### Epoch {} finished all_loss {}".format(epoch + 1, all_loss / epoch_batch_size))

    def run_epochs(self, epoch_size):
        """
        use model to running an epoch
        :param epoch:
        :return:
        """
        for epoch in range(epoch_size):
            self.run_epoch(epoch)


def train():
    trainer = Trainer(model, train_dataloader, val_dataloader, optimizer, scheduler, loss_function, device1,device2)
    trainer.run_epochs(10)

train()
