# coding: utf-8
import os
import time
import torch
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn as nn
from pprint import pprint
from DefaultConfig import DefaultConfig
opt = DefaultConfig()
# from models.models import *
from models.yolov3 import yolov3
from models.yololayer import YOLOLayer
from dataset.dataset import *
from utils.utils import *
from utils.Logger import *
# from dataset import detection_collate
if not os.path.exists("./log/"):
    os.mkdir("./log/")


def train(**kwargs):
    start_epoch = 0
    opt._parse(kwargs)
    print("========= user config =============")
    pprint(opt._state_dict())
    print("========= user config end =========")

    # log
    log = Logger("./log/{}-{}_{}.log".format(__file__.split('/')[-1], opt.net,
                                                    time.strftime("%Y%m%d-%H%M%S"), time.localtime),
                        level='debug').logger
    log.info(opt._state_dict())
    # random seed
    # torch.manual_seed(opt.seed)
    # device
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = ",".join([str(i) for i in opt.GPU_ID])
    device = torch.device("cuda" if torch.cuda.is_available() and opt.USE_CUDA else "cpu")

    # output dir
    if not os.path.exists(opt.save_dir):
        os.mkdir(opt.save_dir)
    save_prefix = os.path.join(opt.save_dir, opt.save_prefix)

    # dataset
    # dataset = ListDataset(opt.dataset_file)
    dataset = MyDataset(opt.dataset_root, opt.dataset_file, transform=True, img_dim=256)
    dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.train_batch, shuffle=True,
                                             num_workers= len(opt.GPU_ID) * 4, pin_memory=True,
                                             collate_fn = dataset.collate_fn)
                                             #collate_fn=detection_collate)

    # model
    net = None
    if opt.net == "yolov3":
        net = yolov3(opt.num_classes)

    if net is None:
        log.error("error: net is None!")
        return

    # anchors
    with open("config/coco_anchors.txt") as f:
        lines = f.readlines()
        anchors = [[int(i.split(',')[0]), int(i.split(',')[1])] for i in lines]
        anchors = [[float(w) / 416 * 256, float(h) / 416 * 256] for w, h in anchors]
    criterion = [YOLOLayer(anchors[0:3], opt.num_classes),
                 YOLOLayer(anchors[3:6], opt.num_classes),
                 YOLOLayer(anchors[6:9], opt.num_classes)]

    # optimizer and scheduler
    # optimizer = optim.SGD(net.parameters(), lr=opt.base_lr, momentum=opt.momentum, weight_decay=opt.weight_decay)
    optimizer = torch.optim.Adam(net.parameters())
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, opt.stepsize, opt.gamma)
    if opt.pre_checkpoint:
        # cp = torch.load(opt.pre_checkpoint)
        net = load_weights(opt.pre_checkpoint, net)
        log.info("=> load state dict from {}...".format(opt.pre_checkpoint))

    if opt.USE_CUDA:
        net = torch.nn.DataParallel(net)
        cudnn.benchmark = True


    net.to(device)
    criterion = [i.to(device) for i in criterion]

    k = 0
    dis = {"loss": 0, "loss_x": 0, "loss_y": 0, "loss_w": 0, "loss_h": 0,
           "loss_conf": 0, "loss_cls": 0, "prec": 0, "recall": 0}
    for epoch in range(start_epoch, opt.max_epoch + 1):
        for batch_idx, (imgs, targets) in enumerate(dataloader):
            imgs = imgs.to(device)
            targets = targets.to(device)

            optimizer.zero_grad()

            y1, y2, y3 = net(imgs)

            output1, loss1 = criterion[2](y1, targets, opt.image_dim)
            output2, loss2 = criterion[1](y2, targets, opt.image_dim)
            output3, loss3 = criterion[0](y3, targets, opt.image_dim)
            loss = loss1 + loss2 + loss3
            loss.backward()

            torch.nn.utils.clip_grad_norm_(net.parameters(), opt.clip_grad)
            optimizer.step()

            if k % opt.display == 0:
                log.info("[Epoch {}/{}, Batch {}/{}, total loss: {}".format(epoch, 
                opt.max_epoch, batch_idx, 
                len(dataloader),
                loss.item()))
                for i in criterion:
                    log.info("[grid_size: {}, loss: {:.4f}, x: {:.4f}, y: {:.4f}, "
                              "w: {:.4f}, h: {:.4f}, conf: {:.4f}, cls: {:.4f}，"
                              " cls_acc: {:.4f}, recall50: {:.4f}, recall75: {:.4f}，"
                              "precision: {:.4f}, conf_obj: {:.4f}, conf_noobj: {:.4f}]".format(
                        i.metrics["grid_size"],
                        i.metrics["loss"],
                        i.metrics["x"],
                        i.metrics["y"],
                        i.metrics["w"],
                        i.metrics["h"],
                        i.metrics["conf"],
                        i.metrics["cls"],
                        i.metrics["cls_acc"],
                        i.metrics["recall50"],
                        i.metrics["recall75"],
                        i.metrics["precision"],
                        i.metrics["conf_obj"],
                        i.metrics["conf_noobj"]
                    ))
                print("")
            if k % opt.save_interval == 0 and k != 0:
                path = save_prefix + "_iter_{}.pkl".format(k)
                SaveCheckPoint(path, net, optimizer, scheduler, epoch)
                log.info("=> save model: {}".format(path))
                print("")
            k += 1

    log.info("optimize done...")
    path = save_prefix + "_final.pkl"
    SaveCheckPoint(path, net, optimizer, scheduler, opt.max_epoch)
    log.info("=> save model: {} ...".format(path))


if __name__ == '__main__':
    import fire
    fire.Fire()

