import os
import time
import math
import paddle
import paddle.nn as nn
from copy import deepcopy


class AveragedModel(nn.Layer):
    def __init__(self, model):
        super().__init__()
        self.module = deepcopy(model)
        self.register_buffer('n_averaged', paddle.to_tensor(0, dtype='int32'))

    def forward(self, *args, **kwargs):
        return self.module(*args, **kwargs)

    def avg_fn(self, averaged_model_parameter, model_parameter, num_averaged):
        return averaged_model_parameter + \
            (model_parameter - averaged_model_parameter) / (num_averaged + 1)

    def update_parameters(self, model: nn.Layer):
        for p_swa, p_model in zip(self.parameters(), model.parameters()):
            p_model_ = p_model.detach()
            if self.n_averaged == 0:
                p_swa.set_value(p_model_)
            else:
                p_swa.set_value(self.avg_fn(p_swa.detach(), p_model_, self.n_averaged))
        self.n_averaged += 1


def update_bn(loader, model: nn.Layer):
    momenta = {}
    for layer in model.sublayers():
        if isinstance(layer, nn.layer.norm._BatchNormBase):
            layer._mean.set_value(paddle.zeros_like(layer._mean))
            layer._variance.set_value(paddle.zeros_like(layer._variance))
            momenta[layer] = layer._momentum
    if not momenta:
        return
    
    model.train()
    num_tracked = 0
    for data in loader:
        b = data[0].shape[0]
        momentum = b / (num_tracked + b)
        for layer in momenta.keys():
            layer._momentum = momentum

        model(data[0])
        num_tracked += b

    for layer, val in momenta.items():
        layer._momentum = val



def swa_train(config,
          loader,
          model,
          loss_class,
          optimizer,
          lr_scheduler,
          logger,
          vdl_writer=None):
    log_smooth_window = config['Global']['log_smooth_window']
    epoch_num = config['Global']['epoch_num']
    print_batch_step = config['Global']['print_batch_step']

    global_step = 0
    start_eval_step = 0
    save_model_dir = config['Global']['save_model_dir']
    if not os.path.exists(save_model_dir):
        os.makedirs(save_model_dir)
    train_stats = TrainingStats(log_smooth_window, ['lr'])
    model_average = False
    model.train()
    start_epoch = 1

    for epoch in range(start_epoch, epoch_num + 1):
        train_batch_cost = 0.0
        train_reader_cost = 0.0
        batch_sum = 0
        batch_start = time.time()
        model.train()
                
        for idx, batch in enumerate(loader):
            train_reader_cost += time.time() - batch_start
            
            lr = optimizer.get_lr()
            images = batch[0]
            preds = model()
            loss = loss_class(preds, batch)
            avg_loss = loss['loss']
            avg_loss.backward()
            optimizer.step()
            optimizer.clear_grad()

            train_batch_cost += time.time() - batch_start
            batch_sum += len(images)

            if not isinstance(lr_scheduler, float):
                lr_scheduler.step()

            # logger and visualdl
            stats = {k: v.numpy().mean() for k, v in loss.items()}
            stats['lr'] = lr
            train_stats.update(stats)

            if vdl_writer is not None and dist.get_rank() == 0:
                for k, v in train_stats.get().items():
                    vdl_writer.add_scalar('TRAIN/{}'.format(k), v, global_step)
                vdl_writer.add_scalar('TRAIN/lr', lr, global_step)

            if dist.get_rank() == 0 and (
                (global_step > 0 and global_step % print_batch_step == 0) or
                (idx >= len(loader) - 1)):
                logs = train_stats.log()
                strs = 'epoch: [{}/{}], iter: {}, {}, reader_cost: {:.5f} s, batch_cost: {:.5f} s, samples: {}, ips: {:.5f}'.format(
                    epoch, epoch_num, global_step, logs, train_reader_cost /
                    print_batch_step, train_batch_cost / print_batch_step,
                    batch_sum, batch_sum / train_batch_cost)
                logger.info(strs)
                train_batch_cost = 0.0
                train_reader_cost = 0.0
                batch_sum = 0

            global_step += 1
            optimizer.clear_grad()
            batch_start = time.time()
        if dist.get_rank() == 0:
            save_model(model, optimizer, save_model_dir, logger, is_best=False,
                prefix='latest', best_model_dict=best_model_dict,
                epoch=epoch, global_step=global_step)

        if dist.get_rank() == 0 and epoch > 0 and epoch % save_epoch_step == 0:
            save_model(model, optimizer, save_model_dir, logger, is_best=False,
                prefix='iter_epoch_{}'.format(epoch), best_model_dict=best_model_dict,
                epoch=epoch, global_step=global_step)

    if dist.get_rank() == 0 and vdl_writer is not None:
        vdl_writer.close()
    return