import matplotlib.pyplot as plt
from torchvision.utils import draw_bounding_boxes, draw_segmentation_masks
import torch
from torchvision.io import read_image

from torchvision.transforms import v2 as T

def plot_box(img, boxes, infos, ax):
    ax.imshow(img)
    for i, box in enumerate(boxes):
        length = box[2] - box[0]
        width = box[3] - box[1]
        ax.add_patch(plt.Rectangle((box[0], box[1]), length, width, color="blue", fill=False, linewidth=1))
        ax.text(box[0], box[1], str(infos[i]), bbox={'facecolor':'blue', 'alpha':0.5})

    return ax

def get_transform(train):
    transforms = []
    if train:
        transforms.append(T.RandomHorizontalFlip(0.5))
    transforms.append(T.ToDtype(torch.float, scale=True))
    transforms.append(T.ToPureTensor())
    return T.Compose(transforms)






def eval_model(model, device):
    image = read_image("../_static/img/tv_tutorial/tv_image05.png")
    eval_transform = get_transform(train=False)
    model.eval()
    with torch.no_grad():
        x = eval_transform(image)
        # convert RGBA -> RGB and move to device
        x = x[:3, ...].to(device)
        predictions = model([x, ])
        pred = predictions[0]


    image = (255.0 * (image - image.min()) / (image.max() - image.min())).to(torch.uint8)
    image = image[:3, ...]
    pred_labels = [f"pedestrian: {score:.3f}" for label, score in zip(pred["labels"], pred["scores"])]
    pred_boxes = pred["boxes"].long()
    output_image = draw_bounding_boxes(image, pred_boxes, pred_labels, colors="red")

    masks = (pred["masks"] > 0.7).squeeze(1)
    output_image = draw_segmentation_masks(output_image, masks, alpha=0.5, colors="blue")


    plt.figure(figsize=(12, 12))
    plt.imshow(output_image.permute(1, 2, 0))