from net.models import *
from utils.utils import *
from utils.datasets import *
from terminaltables import AsciiTable
import os
import time
import datetime
import logging
import torch
from torch.utils.data import DataLoader
import torch.distributed as dist
from torch.utils.tensorboard import SummaryWriter
import torch.optim as optim
import importlib
# from torchsummary import summary


def eval(config):
    init_seeds()
    # Get work device type
    device = select_device()

    logging.info("--------------- Evaluation task start -------------")

    # Construct model and initialize it
    model = DarknetModel(config).to(device)

    # Load checkpoint
    if os.path.exists(config["pretrain_snapshot"]):
        logging.info("Load latest checkpoint from {}".format(config["pretrain_snapshot"]))
        chkpt = torch.load(config["pretrain_snapshot"], map_location=device)
        model.load_state_dict(chkpt['model'])
        del chkpt
    else:
        logging.info(
            "Initialize model by random weights following normal distribution")
        model.apply(weights_init_normal)

    # Print model information
    # summary(model.backbone, input_size=(3, config["img_size"], config["img_size"]), batch_size=config["batch_size"])
    model_info(model)

    # Get data configuration
    train_path = config["data"]["train"]  # Get train data list file
    valid_path = config["data"]["valid"]  # Get valid data list file
    class_names = load_classes(config["data"]["names"])  # Get class names
    # Get evaluate dataloader
    dataset_val = YoloDataset(valid_path, augment=False, multiscale=False)
    dataloader_val = torch.utils.data.DataLoader(dataset_val,
                                                 batch_size=config["batch_size"],
                                                 shuffle=False,
                                                 num_workers=config["n_cpu"],
                                                 pin_memory=True,
                                                 collate_fn=dataset_val.collate_fn,
                                                 timeout=10)

    model.eval()
    # Evaluate the model on the validation set
    precision, recall, AP, f1, ap_class = evaluate(cur_model=model,
                                                   data_loader=dataloader_val,
                                                   device=device,
                                                   iou_thres=0.5,
                                                   conf_thres=0.8,
                                                   nms_thres=0.5,
                                                   img_size=config["img_size"])
    evaluation_metrics = dict()
    evaluation_metrics["val_precision"] = precision.mean()
    evaluation_metrics["val_recall"] = recall.mean()
    evaluation_metrics["val_mAP"] = AP.mean()
    evaluation_metrics["val_f1"] = f1.mean()

    # Print class APs and mAP
    ap_table = [["Index", "Class name",
                 "AP", "Precision", "Recall", "F1"]]
    for i, c in enumerate(ap_class):
        ap_table += [[c, class_names[c], "%.5f" % AP[i], "%.5f" %
                      precision[i], "%.5f" % recall[i], "%.3f" % f1[i]]]
    log_str = "\n"
    log_str += AsciiTable(ap_table).table
    log_str += f"\n---- mAP {AP.mean()}"
    logging.info(log_str)


def main():
    # set logging
    logfile = time.strftime("%Y%m%d%H%M%S", time.localtime(time.time()))
    logfile += "_eval.log"
    fmt = "[%(asctime)s] %(message)s"
    datefmt = "%Y/%m/%d %H:%M:%S"
    logging.basicConfig(level=logging.DEBUG, format=fmt, datefmt=datefmt,
                        filename="logs/" + logfile, filemode="a")
    console = logging.StreamHandler()
    console.setLevel(logging.DEBUG)
    formater = logging.Formatter(fmt=fmt, datefmt=datefmt)
    console.setFormatter(formater)
    logging.getLogger("").addHandler(console)

    # Get configuration
    config = importlib.import_module("param").TRAINING_PARAMS
    eval(config)


if __name__ == "__main__":
    main()
