from dataset import CocoDetection
from transforms import RandomFlip, Resize, ToTensor, Normalize, Compose
from torch.utils.data import DataLoader
from backbone import Resnet50
from Presnet import PResNet
from hybrid_encoder import HybridEncoder
from transformer import RTDETRTransformer
from rtdetr import RTDETR
from transforms import RandomFlip, Resize, ToTensor, Normalize, Compose
from torchvision.ops.boxes import box_area
import numpy as np
import matplotlib.pyplot as plt
import torch
from tqdm import tqdm
from utils import collate_fn

if __name__ == '__main__':
    backbone = PResNet()
    encoder = HybridEncoder()
    decoder = RTDETRTransformer()
    model = RTDETR(backbone, encoder, decoder)
    model.load_state_dict(torch.load('output/weight_base/epoch11.pth'))


    device = torch.device('cuda')
    model = model.to(device)

    CLASSES = [
        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
        'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
        'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
        'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
        'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
        'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
        'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
        'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
        'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
        'chair', 'couch', 'potted plant', 'bed', 'dining table', 
        'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
        'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 
        'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
        'toothbrush'
    ]
    img_folder = '/mnt/sdb2/ray/mmdetection-main/data/coco/val2017/'
    ann_file = '/mnt/sdb2/ray/mmdetection-main/data/coco/annotations/instances_val2017.json'
    transforms =  Compose([Resize((640, 640)),
                            ToTensor(),
                             Normalize()])
    
    dataset = CocoDetection(ann_file, img_folder, transforms)

    dataloader = DataLoader(dataset, batch_size=16, shuffle=False, \
            num_workers=8, collate_fn=collate_fn, pin_memory=True)

    def box_iou(boxes1, boxes2):
        area1 = box_area(boxes1)
        area2 = box_area(boxes2)

        lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]
        rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]

        wh = (rb - lt).clamp(min=0)  # [N,M,2]
        inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]

        union = area1[:, None] + area2 - inter

        iou = inter / union
        return iou
    
    def box_cxcywh_to_xyxy(x):
        x_c, y_c, w, h = x.unbind(1)
        b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
            (x_c + 0.5 * w), (y_c + 0.5 * h)]
        return torch.stack(b, dim=1)
    
    def post_process(samples, targets, model, slc_iou_sco_list, slc_cls_sco_list):
        dec_out = model(samples)

        probas = dec_out[-1]['pred_logits'].sigmoid() #b x 300 x 80
        cls_scores = probas.max(-1).values # b x 300
        keep = cls_scores > 0.5 # b x 300

        slc_boxes = box_cxcywh_to_xyxy(dec_out[-1]['pred_box'][keep] * 640)

        tar_boxes = torch.cat([box_cxcywh_to_xyxy(targets[k]['target_box'] * 640) \
                                for k in range(len(targets))], dim=0)
        ious = box_iou(slc_boxes, tar_boxes.to(device))

        # if ious.size(1) == 0:
        #     continue
        slc_iou_sco = ious.max(-1).values
        slc_cls_sco = cls_scores[keep]

        slc_iou_sco_numpy = slc_iou_sco.cpu().numpy()
        slc_cls_sco_numpy = slc_cls_sco.cpu().numpy()
        slc_iou_sco_list.append(slc_iou_sco_numpy)
        slc_cls_sco_list.append(slc_cls_sco_numpy)
    
    model.eval()
    slc_iou_sco_list = []
    slc_cls_sco_list = []
    with torch.no_grad():
        for samples, targets, img_ids, img_ori_size in tqdm(dataloader, desc='plotting', ncols=100):
            samples = samples.to(device)
            post_process(samples, targets, model, slc_iou_sco_list, slc_cls_sco_list)
            # post_process(samples, targets, model_ano, slc_iou_sco_list_ano, slc_cls_sco_list_ano)

        iou_score = np.concatenate(slc_iou_sco_list, axis=0)
        cls_score = np.concatenate(slc_cls_sco_list, axis=0)
        # iou_score_ano = np.concatenate(slc_iou_sco_list_ano, axis=0)
        # cls_score_ano = np.concatenate(slc_cls_sco_list_ano, axis=0)
        # plt.scatter(iou_score_ano, cls_score_ano, \
        #             s = 0.5, c = 'Blue', alpha = 0.1)
        plt.scatter(iou_score, cls_score, \
                    s = 0.5, c = 'Green', alpha = 0.3)
        plt.title("Using focal loss", fontsize=16, color='darkblue', loc='center')
        plt.xlabel('IOU scores')
        plt.ylabel('CLASS scores')

        plt.savefig('focal loss.png')
    print('finished!')
            






    
