import os
import sys

sys.path.append("../../bin")
import time
import torch
import torch.nn as nn
import torch.utils.data

# import torch.distributed as dist

from src.utils import computer_cer


class Solver:

    def __init__(self, model, config, logger, visualizer=None):
        self.model = model
        self.config = config
        self.logger = logger
        self.visualizer = visualizer

    def iter_one_batch(self, epoch, inputs, inputs_length, targets, targets_length):
        if self.config.model.type == "transducer":
            # for transducer, check for ctc_weight and ce_weight
            ctc_weight, ce_weight = 0.0, 0.0
            if self.config.model.enc.number_of_ctc_epochs and epoch < self.config.model.enc.number_of_ctc_epochs:
                ctc_weight = self.config.model.enc.ctc_weight
            if self.config.model.dec.number_of_ctc_epochs and epoch < self.config.model.dec.number_of_ce_epochs:
                ce_weight = self.config.model.dec.ce_weight
            loss = self.model(inputs, inputs_length, targets, targets_length, ctc_weight, ce_weight)
        else:
            loss = self.model(inputs, inputs_length, targets, targets_length)

        if self.config.training.num_gpu > 1:
            loss = loss / self.config.training.num_gpu
        if torch.isnan(loss):
            self.logger.warning("Train loss is nan. Skipping train loss update")
            return 0, 0
        if torch.isinf(loss):
            self.logger.warning("Train loss is inf. Skipping train loss update")
            return 0, 0
        # normalize the loss by gradient_accumulation step
        loss = loss / self.config.training.accumulation_steps
        loss.backward()

        if self.config.training.max_grad_norm:
            grad_norm = nn.utils.clip_grad_norm_(
                self.model.parameters(), self.config.training.max_grad_norm)
        else:
            grad_norm = 0

        return loss.item(), grad_norm

    def train(self, epoch, training_data, optimizer):
        self.model.train()
        start_epoch = time.process_time()
        total_loss = 0
        optimizer.epoch()
        optimizer.zero_grad()
        batch_steps = len(training_data)

        for step, (inputs, inputs_length, targets, targets_length) in enumerate(training_data):

            start = time.process_time()

            if self.config.optim.step_wise_update:
                optimizer.step_decay_lr()

            if self.config.training.num_gpu > 0:
                inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
                targets, targets_length = targets.cuda(), targets_length.cuda()

            # feed inputs to model and catch "CUDA out of memory" error
            oom = False
            try:
                loss_val, grad_norm = self.iter_one_batch(epoch,
                                                          inputs, inputs_length,
                                                          targets, targets_length)
                total_loss += loss_val
            except RuntimeError:  # Out of memory
                oom = True
                self.logger.warning("CUDA out of memory")

            if oom:
                for i in range(targets_length.shape[0]):
                    loss_val, grad_norm = self.iter_one_batch(epoch,
                                                              inputs[i][:inputs_length[i]].unsqueeze(0),
                                                              inputs_length[i].unsqueeze(0),
                                                              targets[i][:targets_length[i]].unsqueeze(0),
                                                              targets_length[i].unsqueeze(0))
                    total_loss += loss_val / targets_length.shape[0]
            # 梯度累积
            if ((step + 1) % self.config.training.accumulation_steps) == 0:
                # optimizer the net
                optimizer.step()  # update parameters of net
                optimizer.zero_grad()  # reset gradient

            avg_loss = total_loss / (step + 1)
            if self.visualizer is not None:
                self.visualizer.add_scalar('train_loss', loss_val, optimizer.global_step)
                self.visualizer.add_scalar('learn_rate', optimizer.lr, optimizer.global_step)
                self.visualizer.add_scalar('avg_loss', avg_loss, optimizer.global_step)

            if optimizer.global_step % self.config.training.show_interval == 0:
                end = time.process_time()
                process = step / batch_steps * 100
                self.logger.info(
                    '-Training-Epoch:%d(%.5f%%), Global Step:%d, Learning Rate:%.6f, Grad Norm:%.5f, Loss:%.5f, '
                    'AverageLoss: %.5f, Run Time:%.3f' % (epoch, process, optimizer.global_step, optimizer.lr,
                                                          grad_norm, loss_val, avg_loss, end - start))

            # break
        end_epoch = time.process_time()
        self.logger.info('-Training-Epoch:%3d, Average Loss: %.5f, Epoch Time: %.3f' %
                         (epoch, total_loss / (step + 1), end_epoch - start_epoch))

    def eval(self, epoch, validating_data):
        model = self.model
        if isinstance(model, torch.nn.DataParallel):
            model = model.module
        model.eval()
        with torch.no_grad():
            total_loss, total_dist, total_word = 0, 0, 0
            batch_steps = len(validating_data)
            for step, (inputs, inputs_length, targets, targets_length) in enumerate(validating_data):

                if self.config.training.num_gpu > 0:
                    inputs, inputs_length = inputs.cuda(), inputs_length.cuda()
                    targets, targets_length = targets.cuda(), targets_length.cuda()

                preds = model.recognize(inputs, inputs_length)

                transcripts = [targets.cpu().numpy()[i][:targets_length[i].item()]
                               for i in range(targets.size(0))]

                dist, num_words = computer_cer(preds, transcripts)

                total_dist += dist
                total_word += num_words

                cer = total_dist / total_word * 100
                if step % self.config.training.show_interval == 0:
                    process = step / batch_steps * 100
                    self.logger.info('-Validation-Epoch:%d(%.5f%%), CER: %.5f %%' % (epoch, process, cer))
                    self.logger.info('preds:' + validating_data.dataset.decode(preds[0]))
                    self.logger.info('trans:' + validating_data.dataset.decode(transcripts[0]))

            val_loss = total_loss / (step + 1)
            self.logger.info('-Validation-Epoch:%3d, AverageLoss:%.5f, AverageCER: %.5f %%' %
                        (epoch, val_loss, cer))

        if self.visualizer is not None:
            self.visualizer.add_scalar('cer', cer, epoch)

        return cer


