import torch
from torch.utils.data import Dataset, DataLoader
import os
from pathlib import Path
import cv2
from pprint import pprint
import numpy as np
import argparse
from hub.yolov8 import yolov8, yolov8_change_head, yolov8_tfhead
from dataset.dataloader import CocoDataset, SimpleDetDataset
from torchvision.ops import nms
from util.common import image_preprocess
from util.metrics import NMS, collect_simpledetdataloader_resault
from torchmetrics.detection.mean_ap import MeanAveragePrecision
from tqdm import tqdm


def predict_image_list(parse, device=torch.device('cuda')):
    images = []
    if os.path.isdir(parse.img):
        images = [os.path.join(parse.img, f) for f in os.listdir(parse.img)]
    elif os.path.exists(parse.img):
        images.append(parse.img)
    
    print('load img {}'.format(len(images)))
    model = yolov8.yolov8_detect(nc=parse.num_class, phi='s').to(device)
    # model = yolov8_tfhead.yolov8_detect(nc=parse.num_class, model_scale=0.33).to(device)
    if os.path.exists(parse.weight):
        print('load weight from {}'.format(parse.weight))
        pretrained_dict = torch.load(parse.weight, map_location=device)
        model.load_state_dict(pretrained_dict)
    model.eval()
    
    for file in images:
        print('{}'.format(file))
        src_img = cv2.imread(file)
        img = src_img.copy()
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        src_img = image_preprocess(img, (640, 640), False)
        img = image_preprocess(img, (640, 640), True)
        img_tensor = torch.from_numpy(img).float().to(device)
        img_tensor = img_tensor.permute(2, 0, 1).unsqueeze(0)
        y = model(img_tensor).detach().cpu().squeeze(0)
        y = y.permute(1, 0)
        bbox, cls_pref = y.split((4, parse.num_class), 1)
        cls_conf, cls_pre = torch.max(cls_pref, 1, keepdim=True)
        dbox = torch.cat((bbox, cls_conf, cls_pre), dim=1)
        bbox, _, _ = NMS(dbox, scores_threshold=0.5, device=torch.device('cpu'))
        for box in bbox:
            box = box.numpy().astype(np.int)
            src_img = cv2.rectangle(src_img, (box[0], box[1]), (box[2], box[3]), (125, 30, 250), 2)
        cv2.imwrite(os.path.join(parse.save_dir, Path(file).name), src_img)


def eval_dataset(parse, device=torch.device('cuda')):
    batch_size = 8
    dataset_val = SimpleDetDataset(parse.img, annotation_file='val.txt',
                                  batch=batch_size,
                                  device=device
                                  # transform=transforms.Compose([Normalizer(), Resizer()])
                                  )
    dataloader_val = DataLoader(dataset_val, batch_size, collate_fn=CocoDataset.collate_fn)
    model = yolov8_change_head.yolov8_detect(nc=parse.num_class, model_scale=0.33).to(device)

    if os.path.exists(parse.weight):
        print('load weight from {}'.format(parse.weight))
        pretrained_dict = torch.load(parse.weight, map_location=device)
        model.load_state_dict(pretrained_dict)
    model.eval()
    preds, target = collect_simpledetdataloader_resault(dataloader_val, parse.num_class, model)
    metric = MeanAveragePrecision()
    metric.update(preds, target)
    metrics = metric.compute()
    header = '\r'
    value = '\r'
    maxLen = 20
    for m in metrics.keys():
        s = m.center(maxLen, ' ')
        header += s
        s = '{:1.5f}'.format(metrics[m]).center(maxLen, ' ')
        value += s
    print(header)
    print(value)
    
    # pprint(metric.compute())


if __name__ == '__main__':
    args = argparse.ArgumentParser()
    args.add_argument('--img', help='image path', type=str, default='../dataset/aisafety/test')    
    args.add_argument('--check_point', help='check point path', type=str, default='')
    # args.add_argument('--weight', help='weight path', type=str, default='weights/tfhead/best_state_dict.pt')
    args.add_argument('--weight', help='weight path', type=str, default='weights/phase_i_SYNTHIA/best_state_dict.pt')
    args.add_argument('--device', help='device', type=str, default='cuda')
    args.add_argument('--num_class', help='num of class', type=int, default=1)
    args.add_argument('--save_dir', help='save dir', type=str, default='pre/draw_boxes')

    parse = args.parse_args()

    if parse.device == 'cuda':
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    predict_image_list(parse, device)
    # eval_dataset(parse, device)