from model.model import *
from utils.logger import *
from utils.reader_config import *
from utils.utils import *
from model.model import DarkNet
import copy

from terminaltables import AsciiTable

import os
import sys
import time
import datetime
import argparse
import numpy as np

import torch
from torch.utils.data import DataLoader
from torchvision import transforms
from torch.autograd import Variable
from datasets.dataSetUtils import decode_label_to_XYXY, checklabel
import torch.optim as optim
from datasets import build_dataset

def checkData(imgs, targets, labelname):
    for id, img in enumerate(imgs):
        c, h, w = img.shape

        tag = targets[:, 0] == id
        labels = targets[tag, 1:]
        labels = decode_label_to_XYXY(labels, [w, h])

        img = img.permute([1, 2, 0])
        img = img * 255
        img = np.asarray(img, dtype=np.uint8)
        # img = img.numpy().astype(np.uint8)

        for boxid, label in enumerate(labels):
            img = draw_bbox_label(img.copy(), label[1:], labelname[id][boxid])
            cv2.imshow("image", img)
            cv2.waitKey(0)

class trainEngine():
    def __init__(self, config, logger=None):
        self.config = Struct(copy.deepcopy(config))
        self.device = torch.device('cuda') if self.config.train.gpu else torch.device('cpu')
        self.model = DarkNet(self.config.model.model_def).to(self.device)  # Darknet 在 model.py 文件中，这里可以有个  img_size 参数可以配置输入数据的尺寸
        self.model.apply(weight_init_normal)  #  weights_init_normal  在 utils/utils.py 文件中，对权重进行初始化
        self.logger = logger

        if os.path.exists(self.config.train.checkpoints):
        # if not opt.pretrained_weights:
            if self.config.train.checkpoints.endswith(".pth"):
                weigth = torch.load(self.config.train.checkpoints, map_location=self.device)
                self.model.load_state_dict(weigth)
            else:
                self.model.load_darknet_weights(self.config.train.checkpoints)

        # self.class_names = load_classes(self.config.model.names)

        self.dataset = build_dataset(self.config)
        # self.dataset = listDataset(config.train.trainData, augment=True, multiscale=config.train.multiscale_training)
        # self.dataset = CocoDataset(config.train.imagePath, config.train.trainData, 416)

        # self.dataset.make_class_map(self.class_names)
        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=self.config.train.batch_size,
            shuffle=self.config.train.shuffle,
            num_workers=self.config.train.num_works,
            pin_memory=True,
            collate_fn=self.dataset.collate_fn,
        )

        self.optimizer = torch.optim.Adam(self.model.parameters())
        self.metrics = [
                "grid_size",
                "loss",
                "x",
                "y",
                "w",
                "h",
                "conf",
                "cls",
                "cls_acc",
                "recall50",
                "recall75",
                "precision",
                "conf_obj",
                "conf_noobj",
            ]
        config.train_flag = "evolution"
        self.test_engine = testEngine(config, self.logger)

    def train(self):
        print("Start train network")
        self.test_engine.model = self.model
        precision, recall, AP, f1, ap_class = self.test_engine.evalution(epoch=0,
            img_size=self.config.train.img_size, iou_thres=0.5, conf_thres=0.5, nms_thres=0.5)
        bestAp = AP.mean()
        for epoch in range(self.config.train.epochs):
            self.train_epoch(epoch)
            self.test_engine.model = self.model
            precision, recall, AP, f1, ap_class = self.test_engine.evalution(epoch=epoch,
                img_size=self.config.train.img_size, iou_thres=0.5,
                                       conf_thres=0.5, nms_thres=0.5)

            if epoch % self.config.train.checkpoint_interval == 0:
                savepath = os.path.join(self.config.train.checkPoin, "yolov3_ckpt_{}.pth".format(epoch))
                torch.save(self.model.state_dict(), savepath)
            if bestAp < AP.mean():
                bestAp = AP.mean()
                savepath = os.path.join(self.config.train.checkPoin, "yolov3_best.pth".format(epoch))
                torch.save(self.model.state_dict(), savepath)


    def train_epoch(self, epoch):
        self.model.train()
        start_time = time.time()
        for batch_i, (imgs, targets, labelnames) in enumerate(self.dataloader):
            batches_done = len(self.dataloader) * epoch + batch_i

            if self.config.model.checkData:
                checkData(imgs, targets, labelnames)

            imgs = Variable(imgs.to(self.device))
            targets = Variable(targets.to(self.device), requires_grad=False)

            loss, outputs = self.model(imgs, targets)
            loss.backward()

            if batches_done % self.config.train.gradient_accumulations == 0:
                # Accumulates gradient before each step
                print("step")
                self.optimizer.step()
                self.optimizer.zero_grad()

            # ----------------
            #   Log progress
            # ----------------
            log_str = "\n----[Epoch %d/%d, Batch %d/%d] ----\n" %(epoch, self.config.train.epochs, batch_i, len(self.dataloader))
            metric_table = [["metrics", *["YOLO Layer {}".format(i) for i in range(len(self.model.yolo_layers))]]]

            # Log metrics at each YOLO layer
            for i, metric in enumerate(self.metrics):
                formats = {m: "%.6f" for m in self.metrics}
                formats["grid_size"] = "%2d"
                formats["cls_acc"] = "%.2f%%"
                row_metrics = [formats[metric] % yolo.metrics.get(metric, 0) for yolo in self.model.yolo_layers]
                metric_table += [[metric, *row_metrics]]

                # Tensorboard logging
                tensorboard_log = []
                for j, yolo in enumerate(self.model.yolo_layers):
                    for name, metric in yolo.metrics.items():
                        if name == "grid_size":
                            tensorboard_log += [("{}_{}".format(name, j+1), metric)]
                tensorboard_log += [("loss", loss.item())]
                # logger.list_of_scalars_summary(tensorboard_log, batches_done)

            log_str += AsciiTable(metric_table).table
            log_str += "\nTotal loss {}".format(loss.item())

            # Determine approximate time left for epoch
            epoch_batches_left = len(self.dataloader) - (batch_i + 1)
            time_left = datetime.timedelta(seconds=epoch_batches_left * (time.time() - start_time) / (batch_i + 1))
            log_str += "\n---- ETA {}".format(time_left)
            print(log_str)
            self.model.seen += imgs.size(0)
            # break



class testEngine():
    def __init__(self, config, logger=None):
        self.config = Struct(copy.deepcopy(config))

        self.device = torch.device('cuda') if self.config.test.gpu else torch.device('cpu')

        self.logger = logger

        if self.config.train_flag != "evolution":
            self.model = DarkNet(self.config.model.model_def).to(self.device)  # Darknet 在 model.py 文件中，这里可以有个  img_size 参数可以配置输入数据的尺寸
            self.model.apply(weight_init_normal)  #  weights_init_normal  在 utils/utils.py 文件中，对权重进行初始化

        if os.path.exists(self.config.test.checkpoints) and self.config.train_flag != "evolution":
            if self.config.train.checkpoints.endswith(".pth"):
                weigth = torch.load(self.config.test.checkpoints, map_location=self.device)
                self.model.load_state_dict(weigth)
            else:
                self.model.load_darknet_weights(self.config.test.checkpoints)

        # self.class_names = load_classes(self.config.model.names)

        self.dataset = build_dataset(self.config)
        # self.dataset = listDataset(self.config.test.validData, augment=True, multiscale=self.config.test.multiscale_training)
        # self.dataset.make_class_map(self.class_names)

        self.dataloader = torch.utils.data.DataLoader(
            self.dataset,
            batch_size=self.config.test.batch_size,
            shuffle=False,
            num_workers=self.config.test.num_works,
            pin_memory=True,
            collate_fn=self.dataset.collate_fn,
        )

        if self.config.train_flag != "evolution":
            self.optimizer = torch.optim.Adam(self.model.parameters())
        self.metrics = [
                "grid_size",
                "loss",
                "x",
                "y",
                "w",
                "h",
                "conf",
                "cls",
                "cls_acc",
                "recall50",
                "recall75",
                "precision",
                "conf_obj",
                "conf_noobj",
            ]

    def evalution(self, epoch=0, img_size = 416, iou_thres=0.5, conf_thres=0.5, nms_thres=0.5):
        print("\n---- Evaluating in epoch: {}----".format(epoch))
        self.model.eval()
        # Get dataloader
        Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
        labels = []
        sample_metrics = []  # List of tuples (TP, confs, pred)
        with torch.no_grad():
            for batch_i, (imgs, targets, labelnames) in enumerate(tqdm.tqdm(self.dataloader, desc="Detecting objects")):
                # Extract labels
                labels += targets[:, 1].tolist()

                if self.config.model.checkData:
                    checkData(imgs, targets, labelnames)

                # Rescale target
                targets[:, 2:] = xywh2xyxy(targets[:, 2:])
                targets[:, 2:] *= img_size
                imgs = Variable(imgs.type(Tensor), requires_grad=False)
                outputs = self.model(imgs)
                outputs = non_max_suppression(outputs, conf_thres=conf_thres, nms_thres=nms_thres)
                sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=iou_thres)

            # Concatenate sample statistics
            true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
            precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)

            evaluation_metrics = [
                ("val_precision", precision.mean()),
                ("val_recall", recall.mean()),
                ("val_mAP", AP.mean()),
                ("val_f1", f1.mean()),
            ]

            self.logger.list_of_scalars_summary(tag_value_pairs=evaluation_metrics, step=epoch)

            # Print class APs and mAP
            ap_table = [["Index", "Class name", "AP"]]
            for i, c in enumerate(ap_class):
                ap_table += [[c, self.dataset.cats[c], "%.5f" % AP[i]]]
            print(AsciiTable(ap_table).table)
            print(f"---- mAP {AP.mean()}")

            return precision, recall, AP, f1, ap_class




