import os

import numpy as np
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from omnidet.data_loader.woodscape_loader import WoodScapeRawDataset
from omnidet.main import collect_tupperware

from omnidet.models.detection_decoder import YoloDecoder
from omnidet.models.resnet import ResnetEncoder
from omnidet.train_utils.detection_utils import *

import copy
from PIL import Image
import cv2


def inference(args):

    val_dataset = WoodScapeRawDataset(data_path=args.dataset_dir,
                                      path_file=args.val_file,
                                      is_train=False,
                                      config=args)

    val_loader = DataLoader(val_dataset,
                            batch_size=args.batch_size,
                            shuffle=True,
                            num_workers=args.num_workers,
                            pin_memory=True,
                            drop_last=True,
                            collate_fn=val_dataset.collate_fn)

    print("Running: Loading model from pretrained_weights.")
    pre_trained_dir = '/home/li/深度学习/鱼眼相机数据集相关资料/模型/OmniDet_Boilerplate_Weights-20250102T112055Z-001/OmniDet_Boilerplate_Weights/res18'
    encoder_path = os.path.join(pre_trained_dir, "encoder.pth")
    decoder_path = os.path.join(pre_trained_dir, "detection.pth")

    encoder = ResnetEncoder(num_layers=args.network_layers, pretrained=False).to(args.device)
    loaded_dict_enc = torch.load(encoder_path, map_location=args.device)
    feed_height = loaded_dict_enc['height']
    feed_width = loaded_dict_enc['width']
    filtered_dict_enc = {k: v for k, v in loaded_dict_enc.items() if k in encoder.state_dict()}
    encoder.load_state_dict(filtered_dict_enc)
    encoder.eval()

    decoder = YoloDecoder(encoder.num_ch_enc, args).to(args.device)
    loaded_dict = torch.load(decoder_path, map_location=args.device)
    decoder.load_state_dict(loaded_dict)
    decoder.eval()

    labels, sample_metrics = (list(), list())  # List of tuples (TP, confs, pred)
    img_size = [feed_height, feed_width]
    for batch_i, inputs in enumerate(tqdm(val_loader, desc="Detecting objects")):

        for key, ipt in inputs.items():
            inputs[key] = ipt.to(args.device)

        real_inputs = inputs["color_aug", 0, 0][0].unsqueeze(0)

        real_image_tensor = copy.deepcopy(real_inputs.cpu())
        real_image_tensor = torch.squeeze(real_image_tensor)
        real_image_tensor = real_image_tensor.permute([1, 2, 0])
        real_image = real_image_tensor.numpy()

        real_image = real_image * 255
        real_image = real_image.astype(np.uint8)
        real_image = np.ascontiguousarray(real_image)

        real_im = Image.fromarray(real_image)
        real_im.show()

        features = encoder(real_inputs)
        outputs = decoder(features, img_dim=[feed_width, feed_height])["yolo_outputs"]

        # Extract labels
        show_images = copy.deepcopy(inputs["color_aug", 0, 0].cpu())
        targets = inputs[("detection_labels", 0)].cpu()
        labels += targets[:, 1].tolist()
        # Rescale target
        targets[:, 2:6] = xywh2xyxy(targets[:, 2:6])
        targets[:, 2] *= img_size[0]
        targets[:, 3] *= img_size[1]
        targets[:, 4] *= img_size[0]
        targets[:, 5] *= img_size[1]

        outputs = non_max_suppression(outputs, conf_thres=args.detection_conf_thres, nms_thres=args.detection_nms_thres)
        sample_metrics += get_batch_statistics(outputs, targets, iou_threshold=0.5, args=args)

        sh_img = show_images[0].permute(1, 2, 0)
        sh_img = sh_img.numpy() * 255
        sh_img = sh_img.astype(np.uint8)
        sh_img = np.ascontiguousarray(sh_img)

        for ind in range(len(outputs[0])):
            out_boxes = outputs[0][ind][:4]
            min_x, min_y, max_x, max_y = out_boxes.numpy()
            cv2.rectangle(sh_img, (int(min_x), int(min_y)), (int(max_x), int(max_y)), (0, 0, 255), 1)

        im = Image.fromarray(sh_img)
        im.show()

    # Handle the case of empty sample_metrics:
    if len(sample_metrics) == 0:
        precision, recall, AP, f1, ap_class = 0, 0, 0, 0, 0
    else:
        # Concatenate sample statistics
        true_positives, pred_scores, pred_labels = [np.concatenate(x, 0) for x in list(zip(*sample_metrics))]
        precision, recall, AP, f1, ap_class = ap_per_class(true_positives, pred_scores, pred_labels, labels)


if __name__ == "__main__":
    args = collect_tupperware()
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_visible_devices or -1
    inference(args)
