from net.models import *
from net.yolo_loss import *
from net.yolo_layer import *
from utils.utils import *
from utils.datasets import *
from terminaltables import AsciiTable
import os
import time
import datetime
import logging
import torch
from torch.utils.data import DataLoader
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import importlib
from torchsummary import summary

metrics = [ "total_loss",
            "ciou_loss",
            "obj_loss",
            "cls_loss",
            "grid_x",
            "grid_y"
           ]


def train(config):
    init_seeds()
    # Get work device type
    device = select_device()

    logging.info("--------------- Train task start -------------")

    # Construct model and initialize it
    model = DarknetModel(config).to(device)
    # Get optimizer
    optimizer = _get_optimizer(config, model)

    start_epoch = 0
    best_loss = float('inf')
    # Load checkpoint
    if config["resume"] and os.path.exists(config["pretrain_snapshot"]):
        logging.info("Resume to latest checkpoint from {}".format(config["pretrain_snapshot"]))
        chkpt = torch.load(config["pretrain_snapshot"], map_location=device)
        model.load_state_dict(chkpt['model'])
        start_epoch = chkpt['epoch'] + 1
        if chkpt['optimizer'] is not None:
            optimizer.load_state_dict(chkpt['optimizer'])
            best_loss = chkpt['best_loss']
        del chkpt
    else:
        logging.info("Initialize model by random weights following normal distribution")
        model.apply(weights_init_normal)

    # Print model information
    # summary(model.backbone, input_size=(3, config["img_size"], config["img_size"]), batch_size=config["batch_size"])
    model_info(model)
    # Setup LR scheduler
    lf = lambda x: 1 - 10 ** (config["lr"]['lrf'] * (1 - x / config["epochs"]))  # inverse exp ramp
    lr_scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lf, last_epoch=start_epoch - 1)

    # Plot lr schedule
    # y0 = []
    # y1 = []
    # for _ in range(config["epochs"]):
    #     lr_scheduler.step()
    #     y0.append(optimizer.param_groups[0]['lr'])
    #     y1.append(optimizer.param_groups[1]['lr'])
    # plt.plot(y0, label='LambdaLR_Backbone')
    # plt.plot(y1, label="LambdaLR_Embedding")
    # plt.xlabel('epoch')
    # plt.xlabel('LR')
    # plt.tight_layout()
    # plt.savefig('LR.png', dpi=300)

    # Get data configuration
    train_path = config["data"]["train"]  # Get train data list file
    valid_path = config["data"]["valid"]  # Get valid data list file
    class_names = load_classes(config["data"]["names"])  # Get class names
    # Get train dataloader
    dataset_train = YoloDataset(train_path, img_size=config["img_size"][0], augment=config["augment"], multiscale=config["multiscale_train"])
    # Get evaluate dataloader
    dataset_val = YoloDataset(valid_path, img_size=config["img_size"][0], augment=False, multiscale=False)
    dataloader_train = torch.utils.data.DataLoader(dataset_train,
                                                   batch_size=config["batch_size"],
                                                   shuffle=True,
                                                   num_workers=config["n_cpu"],
                                                   pin_memory=True,
                                                   collate_fn=dataset_train.collate_fn,
                                                   timeout=10)
    dataloader_val = torch.utils.data.DataLoader(dataset_val,
                                                 batch_size=config["batch_size"],
                                                 shuffle=False,
                                                 num_workers=config["n_cpu"],
                                                 pin_memory=True,
                                                 collate_fn=dataset_val.collate_fn,
                                                 timeout=10)

    # # Initialize distributed training
    # if torch.cuda.device_count() > 1:
    #     dist.init_process_group(backend="nccl", init_method=opt.dist_url, world_size=opt.world_size, rank=opt.rank)
    #     model = torch.nn.parallel.DistributedDataParallel(model)

    for epoch in range(start_epoch, config["epochs"]):
        start_time = time.time()
        model.train()
        for batch_i, (imgs, targets) in enumerate(dataloader_train):
            batches_done = len(dataloader_train) * epoch + batch_i
            imgs = imgs.to(device)
            targets = targets.to(device)

            pred = model(imgs)  # inference and compute loss
            loss, loss_metrics = compute_loss(pred, targets, model, lossHyp=config["loss_hyp"])
            loss.backward()  # compute gradient

            # Accumulate gradient for x batches before optimizing
            if batches_done > 1 and batches_done % config["gradient_accumulation"] == 0:
                # Accumulates gradient before each step
                optimizer.step()
                optimizer.zero_grad()
                # lr_scheduler.step()

            # Determine approximate time left for epoch
            epoch_batches_left = len(dataloader_train) - (batch_i + 1)
            seconds_left = round(epoch_batches_left * (time.time() - start_time) / (batch_i + 1))
            time_left = datetime.timedelta(seconds=seconds_left)
            # ----------------
            #   Log progress
            # ----------------
            log_str = "[Epoch {}/{}, Batch {}/{} ETA {}]\n".format(epoch + 1, config["epochs"], batch_i,
                                                                   len(dataloader_train), time_left)

            metric_table = [["Metrics", *[f"YOLO Layer {i}" for i in range(len(model.yolo_layers))]]]
            # Log metrics at each YOLO layer
            for i, loss_metric in enumerate(loss_metrics):
                formats = {m: "%.6f" for m in metrics}
                formats["grid_x"] = "%2d"
                formats["grid_y"] = "%2d"
                row_metrics = []
                row_metrics = [formats[metrics[i]] % val for val in loss_metric]
                metric_table += [[metrics[i], *row_metrics]]
            metric_table += [["Total Loss", "%.6f" % loss.detach().cpu()]]

            log_str += AsciiTable(metric_table).table
            logging.info(log_str + "\n")
            model.seen += imgs.size(0)

        if (epoch + 1) % config["evaluation_interval"] == 0 and epoch > 0:
            log_str = "---- Evaluating Model ----\n"
            # print("\n---- Evaluating Model ----")
            model.eval()
            # Evaluate the model on the validation set
            precision, recall, AP, f1, ap_class = evaluate(cur_model=model,
                                                           data_loader=dataloader_val,
                                                           device=device,
                                                           iou_thres=0.5,
                                                           conf_thres=0.8,
                                                           nms_thres=0.5,
                                                           img_size=config["img_size"])
            evaluation_metrics = dict()
            evaluation_metrics["val_precision"] = precision.mean()
            evaluation_metrics["val_recall"] = recall.mean()
            evaluation_metrics["val_mAP"] = AP.mean()
            evaluation_metrics["val_f1"] = f1.mean()

            # Print class APs and mAP
            ap_table = [["Index", "Class name", "AP", "Precision", "Recall", "F1"]]
            for i, c in enumerate(ap_class):
                ap_table += [[c, class_names[c], "%.5f" % AP[i], "%.5f" % precision[i], "%.5f" % recall[i], "%.3f" % f1[i]]]
            log_str += AsciiTable(ap_table).table
            log_str += f"\n---- mAP {AP.mean()}"
            logging.info(log_str)

        if (epoch + 1) % config["checkpoint_interval"] == 0 and epoch > 0:
            # Create checkpoint
            chkpt = {'epoch': epoch,
                     'best_loss': best_loss,
                     'model': model.state_dict(),
                     'optimizer': optimizer.state_dict()}
            torch.save(chkpt, config["pretrain_snapshot"])
            del chkpt
            logging.info("save checkpoint done")


def _get_optimizer(config, net):
    optimizer = None

    # Assign different lr for each layer
    params = None
    base_params = list(map(id, net.backbone.parameters())) # 获取backbone网络的参数ID
    logits_params = filter(lambda p: id(p) not in base_params, net.parameters()) # 获取backbone网络之外的参数ID

    if not config["lr"]["freeze_backbone"]:
        params = [
            {"params": logits_params, "lr": config["lr"]["other_lr"]},
            {"params": net.backbone.parameters(), "lr": config["lr"]["backbone_lr"]},
        ]
    else:
        logging.info("freeze backbone's parameters.")
        for p in net.backbone.parameters():
            p.requires_grad = False
        params = [
            {"params": logits_params, "lr": config["lr"]["other_lr"]},
        ]

    # Initialize optimizer class
    if config["optimizer"]["type"] == "adam":
        logging.info("Using Adam optimizer")
        optimizer = optim.Adam(params, weight_decay=config["optimizer"]["weight_decay"])
    elif config["optimizer"]["type"] == "amsgrad":
        logging.info("Using AMSGrad optimizer")
        optimizer = optim.Adam(params, weight_decay=config["optimizer"]["weight_decay"],
                               amsgrad=True)
    elif config["optimizer"]["type"] == "rmsprop":
        logging.info("Using RMSprop optimizer")
        optimizer = optim.RMSprop(params, weight_decay=config["optimizer"]["weight_decay"])
    else:
        # Default to sgd
        logging.info("Using SGD optimizer")
        optimizer = optim.SGD(params, momentum=0.9, weight_decay=config["optimizer"]["weight_decay"])

    return optimizer


def main():
    # set logging
    logfile = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
    logfile += ".log"
    fmt = "[%(asctime)s] %(message)s"
    datefmt = "%Y/%m/%d %H:%M:%S"
    logging.basicConfig(level=logging.DEBUG, format=fmt, datefmt=datefmt,
                        filename="logs/" + logfile, filemode="a")
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    formater = logging.Formatter(fmt=fmt, datefmt=datefmt)
    console.setFormatter(formater)
    logging.getLogger("").addHandler(console)

    # Get configuration
    config = importlib.import_module("param").TRAINING_PARAMS
    train(config)


if __name__ == "__main__":
    main()



