# Copyright (c) Malong LLC
# All rights reserved.
#
# Contact: github@malongtech.com
#
# This source code is licensed under the LICENSE file in the root directory of this source tree.

import datetime
import logging
import time

import torch
import torch.distributed as dist

from fcos_core.utils.comm import get_world_size, is_pytorch_1_1_0_or_later
from fcos_core.utils.metric_logger import MetricLogger

import os
import yaml

from fad_core.visualize import plot

def reduce_loss_dict(loss_dict):
    """
    Reduce the loss dictionary from all processes so that process with rank
    0 has the averaged results. Returns a dict with the same fields as
    loss_dict, after reduction.
    """
    world_size = get_world_size()
    if world_size < 2:
        return loss_dict
    with torch.no_grad():
        loss_names = []
        all_losses = []
        for k in sorted(loss_dict.keys()):
            loss_names.append(k)
            all_losses.append(loss_dict[k])
        all_losses = torch.stack(all_losses, dim=0)
        dist.reduce(all_losses, dst=0)
        if dist.get_rank() == 0:
            # only main process gets accumulated, so only divide by
            # world_size in this case
            all_losses /= world_size
        reduced_losses = {k: v for k, v in zip(loss_names, all_losses)}
    return reduced_losses


def do_train(
    model,
    arch,
    data_loader,
    val_loader,
    optimizer,
    alpha_optim,
    scheduler,
    checkpointer,
    device,
    checkpoint_period,
    arguments,
    cfg,
    tb_info={},
    first_order=True,
):
    logger = logging.getLogger("fad_core.trainer")
    logger.info("Start the architecture search")
    meters = MetricLogger(delimiter="  ")
    max_iter = len(data_loader)
    start_iter = arguments["iteration"]
    model.train()
    start_training_time = time.time()
    end = time.time()
    pytorch_1_1_0_or_later = is_pytorch_1_1_0_or_later()

    Genotype = model.genotype()
    iteration = 0
    for n_m, genotype in enumerate(Genotype):
        logger.info("genotype = {}".format(genotype))  
                       
    for iteration, ((images, targets, _), (images_val, targets_val, _))in enumerate(zip(data_loader, val_loader), start_iter):
        data_time = time.time() - end
        iteration = iteration + 1
        arguments["iteration"] = iteration

        scheduler.step()
          
        if len(targets)==cfg.SOLVER.IMS_PER_BATCH and len(targets_val)==cfg.SOLVER.IMS_PER_BATCH: 

            images = images.to(device)
            targets = [target.to(device) for target in targets]
            images_val = images_val.to(device)
            targets_val = [target.to(device) for target in targets_val]

    
            # -------------- update alpha
            lr = scheduler.get_lr()[0]
            alpha_optim.zero_grad()
                    
            if not first_order:
                # ----- 2nd order
                arch.unrolled_backward(images, targets, images_val, targets_val, lr, optimizer) 
            else:
                # ----- 1st order
                arch.first_order_backward(images_val, targets_val) 
            alpha_optim.step()


            # --------------- update w
            loss_dict = model(images, targets)

            losses = sum(loss for loss in loss_dict.values())

            # reduce losses over all GPUs for logging purposes
            loss_dict_reduced = reduce_loss_dict(loss_dict)
            losses_reduced = sum(loss for loss in loss_dict_reduced.values())
            for lkey, lval in loss_dict_reduced.items(): 
                loss_dict_reduced[lkey] = lval.mean()
            meters.update(loss=losses_reduced.mean(), **loss_dict_reduced)
 
            # --------- tensorboard logger
            tb_logger = tb_info.get('tb_logger', None)
            if tb_logger:
                tb_prefix = '{}loss'.format(tb_info['prefix'])  
                tb_logger.add_scalar(tb_prefix, losses_reduced.mean(), iteration)

                for key, value in loss_dict_reduced.items():
                    tb_prefix = "{}{}".format(tb_info['prefix'], key)
                    tb_logger.add_scalar(tb_prefix, value, iteration) 

                tb_prefix = '{}loss'.format(tb_info['prefix'])  
                tb_logger.add_scalar(tb_prefix+'_z_lr', lr, iteration)

            optimizer.zero_grad()
            losses.mean().backward()
            torch.nn.utils.clip_grad_norm_(model.weights(), 20)
            optimizer.step()

            batch_time = time.time() - end
            end = time.time()
            meters.update(time=batch_time, data=data_time)

            eta_seconds = meters.time.global_avg * (max_iter - iteration)
            eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))

            if iteration % 20 == 0 or iteration == max_iter:
                logger.info(
                    meters.delimiter.join(
                        [
                            "eta: {eta}",
                            "iter: {iter}",
                            "{meters}",
                            "lr: {lr:.6f}",
                            "max mem: {memory:.0f}",
                        ]
                    ).format(
                        eta=eta_string,
                        iter=iteration,
                        meters=str(meters),
                        lr=optimizer.param_groups[0]["lr"],
                        memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
                    )
                )

            if iteration % (checkpoint_period) == 0:
                checkpointer.save("model_{:07d}".format(iteration), **arguments)
            if iteration == max_iter:
                checkpointer.save("model_final", **arguments)

            # ---------- save genotype
            if cfg.MODEL.FAD.PLOT and (iteration % checkpoint_period == 0):
                
                Genotype = model.genotype()
                fw = open(f"{cfg.OUTPUT_DIR}/genotype.log", "w")
                for n_m, genotype in enumerate(Genotype):
                    logger.info("genotype = {}".format(genotype))
                    # write genotype for augment
                    fw.write(f"{genotype}\n")

                    # genotype as a image   
                    plot_path = os.path.join(cfg.OUTPUT_DIR+'/plots', "Module%d" % n_m, "Iter{:06d}".format(iteration))        
                    caption = "Iteration {}".format(iteration)    
                    plot(genotype.normal, plot_path + "-normal", caption)           
                model.print_alphas(logger)
                fw.close()
         
    total_training_time = time.time() - start_training_time
    total_time_str = str(datetime.timedelta(seconds=total_training_time))
    logger.info(
        "Total training time: {} ({:.4f} s / it)".format(
            total_time_str, total_training_time / (max_iter)
        )
    )
