from collections import OrderedDict

import torch
from torchline.engine import MODULE_REGISTRY, DefaultModule
from torchline.utils import AverageMeterGroup


@MODULE_REGISTRY.register()
class MyEngine(DefaultModule):
    def __init__(self, cfg):
        super(MyEngine, self).__init__(cfg)
        self.example_input_array = torch.rand(1, 2, 512)

    def training_step(self, batch, batch_idx):
        """
        Lightning calls this inside the training loop
        :param batch:
        :return:
        """
        if batch_idx == 0:
            self.train_meters = AverageMeterGroup() # reset meters at a new epoch

        # forward pass
        inputs, gt_labels = batch
        predictions = self.forward(inputs)

        # calculate loss
        loss_val = self.loss(predictions, gt_labels)

        tqdm_dict = {}

        # in DP mode (default) make sure if result is scalar, there's another dim in the beginning
        if self.trainer.use_dp or self.trainer.use_ddp2:
            loss_val = loss_val.unsqueeze(0)

        tqdm_dict.update({'train_loss': loss_val.item()})

        output = OrderedDict({
            'loss': loss_val,
            'progress_bar': tqdm_dict,
            'log': tqdm_dict
        })

        self.train_meters.update(tqdm_dict)
        self.print_log(batch_idx, True, inputs, self.train_meters)

        # can also return just a scalar instead of a dict (return loss_val)
        return output

    def validation_step(self, batch, batch_idx):
        """
        Lightning calls this inside the validation loop
        :param batch:
        :return:
        """
        if batch_idx == 0:
            self.valid_meters = AverageMeterGroup() # reset meters at a new epoch
        inputs, gt_labels = batch
        predictions = self.forward(inputs)

        loss_val = self.loss(predictions, gt_labels)

        # in DP mode (default) make sure if result is scalar, there's another dim in the beginning
        if self.trainer.use_dp or self.trainer.use_ddp2:
            loss_val = loss_val.unsqueeze(0)
        
        output = OrderedDict({
            'valid_loss': loss_val,
        })
        tqdm_dict = {k: v.item() for k, v in dict(output).items()}
        self.valid_meters.update(tqdm_dict)
        self.print_log(batch_idx, False, inputs, self.valid_meters)

        # can also return just a scalar instead of a dict (return loss_val)
        return output

    def validation_end(self, outputs):
        """
        Called at the end of validation to aggregate outputs
        :param outputs: list of individual outputs of each validation step
        :return:
        """
        # if returned a scalar from validation_step, outputs is a list of tensor scalars
        # we return just the average in this case (if we want)
        # return torch.stack(outputs).mean()

        val_loss_mean = 0
        for output in outputs:
            val_loss = output['valid_loss']

            # reduce manually when using dp
            if self.trainer.use_dp or self.trainer.use_ddp2:
                val_loss = torch.mean(val_loss)
            val_loss_mean += val_loss

        val_loss_mean /= len(outputs)
        tqdm_dict = {'valid_loss': val_loss_mean}
        result = {'progress_bar': tqdm_dict, 'log': tqdm_dict, 'valid_loss': val_loss_mean}
        return result
