from backbone import Resnet50
from hybrid_encoder import HybridEncoder
from transformer import RTDETRTransformer
from rtdetr import RTDETR
from Presnet import PResNet
import torchvision.transforms as T

import torchvision.transforms as T
import torch
import matplotlib.pyplot as plt
from PIL import Image

device = 'cpu'
backbone, encoder, decoder = PResNet(), HybridEncoder(), RTDETRTransformer()
model = RTDETR(backbone, encoder, decoder).to(device)
checkpoint_path = 'output/weight_presnet/epoch11.pth'
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint)

val_transforms = T.Compose([T.Resize((640, 640)),
                                T.ToTensor(),
                                T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))])

CLASSES = [
    'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
    'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
    'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
    'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
    'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
    'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
    'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
    'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
    'chair', 'couch', 'potted plant', 'bed', 'dining table', 
    'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
    'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 
    'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
    'toothbrush'
]

COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
          [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]

def box_cxcywh_to_xyxy(x):
    x_c, y_c, w, h = x.unbind(1)
    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
         (x_c + 0.5 * w), (y_c + 0.5 * h)]
    return torch.stack(b, dim=1)

def rescale_bboxes(out_bbox, size):
    img_w, img_h = size
    b = box_cxcywh_to_xyxy(out_bbox)
    b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
    return b

def plot_results(pil_img, prob, boxes, save_root = None):
    plt.figure(figsize=(16,10))
    plt.imshow(pil_img)
    ax = plt.gca()
    colors = COLORS * 100
    for p, (xmin, ymin, xmax, ymax), c in zip(prob, boxes.tolist(), colors):
        ax.add_patch(plt.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin,
                                   fill=False, color=c, linewidth=3))
        cl = p.argmax()
        text = f'{CLASSES[cl]}: {p[cl]:0.2f}'
        ax.text(xmin, ymin, text, fontsize=15,
                bbox=dict(facecolor='yellow', alpha=0.5))
    plt.axis('off')
    plt.savefig(save_root)

im_path = '/mnt/sdb2/ray/mmdetection-main/data/coco/val2017/000000001993.jpg'
im = Image.open(im_path)
img = val_transforms(im).unsqueeze(0).to(device)

model.eval()
outputs = model(img)
outputs = outputs[5]

probas = outputs['pred_logits'].sigmoid()[0, :, :-1]
keep = probas.max(-1).values > 0.2
bboxes_scaled = rescale_bboxes(outputs['pred_box'][0, keep], im.size)
plot_results(im, probas[keep], bboxes_scaled, 'predic')
