import warnings

warnings.filterwarnings('ignore')
warnings.simplefilter('ignore')
import os
import random
import cv2
import numpy as np
import torch
import torchvision
from pytorch_grad_cam import AblationCAM, EigenCAM
from pytorch_grad_cam.ablation_layer import AblationLayerFasterRCNN
from pytorch_grad_cam.utils.model_targets import FasterRCNNBoxScoreTarget
from pytorch_grad_cam.utils.reshape_transforms import fasterrcnn_reshape_transform
from pytorch_grad_cam.utils.image import show_cam_on_image, scale_accross_batch_and_channels, scale_cam_image

def predict(input_tensor, model, device, detection_threshold):
    outputs = model(input_tensor)
    pred_classes = [coco_names[i] for i in outputs[0]['labels'].cpu().numpy()]
    pred_labels = outputs[0]['labels'].cpu().numpy()
    pred_scores = outputs[0]['scores'].detach().cpu().numpy()
    pred_bboxes = outputs[0]['boxes'].detach().cpu().numpy()
    
    boxes, classes, labels, indices = [], [], [], []
    for index in range(len(pred_scores)):
        if pred_scores[index] >= detection_threshold:
            boxes.append(pred_bboxes[index].astype(np.int32))
            classes.append(pred_classes[index])
            labels.append(pred_labels[index])
            indices.append(index)
    boxes = np.int32(boxes)
    return boxes, classes, labels, indices


def draw_boxes(boxes, labels, classes, image):
    '''
    boxes [[ 842  583  978  667]
    [  77  331  164  436]
    [1512  419 1690  508]
    [ 590  354  754  454]]
    classes ['bird', 'bird', 'bird', 'bird']
    labels [16, 16, 16, 16]
    indices [0, 1, 2, 3]
    '''
    for i, box in enumerate(boxes):
        color = COLORS[labels[i]]
        cv2.rectangle(
            image,
            (int(box[0]), int(box[1])),
            (int(box[2]), int(box[3])),
            color, 5
        )
        # cv2.putText(image, classes[i], (int(box[0]), int(box[1] - 5)),
        #             cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2,
        #             lineType=cv2.LINE_AA)
    return image

coco_names = ['__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', \
              'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 
              'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 
              'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella',
              'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',
              'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
              'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass', 'cup', 'fork',
              'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',
              'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
              'potted plant', 'bed', 'N/A', 'dining table', 'N/A', 'N/A', 'toilet',
              'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
              'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', 'clock', 'vase',
              'scissors', 'teddy bear', 'hair drier', 'toothbrush']


# This will help us create a different color for each class
COLORS = np.random.uniform(0, 255, size=(len(coco_names), 3))


import requests
import torchvision
from PIL import Image

# image_url = "https://raw.githubusercontent.com/jacobgil/pytorch-grad-cam/master/examples/both.png"
# image = np.array(Image.open(requests.get(image_url, stream=True).raw))

image_url = '/DATA3_DB7/data/weixionglin/AVQA/boxing/Boxing_QP16/Boxing_QP16-010.jpeg'
# image_url = '/DATA3_DB7/data/weixionglin/AVQA/car/Car_QP16/Car_QP16-001.jpeg'
# image_url = '/DATA3_DB7/data/weixionglin/AVQA/goose/Goose_QP16/Goose_QP16-001.jpeg'

image = np.array(Image.open(image_url))

image_float_np = np.float32(image) / 255
# define the torchvision image transforms
transform = torchvision.transforms.Compose([
    torchvision.transforms.ToTensor(),
])

input_tensor = transform(image)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_tensor = input_tensor.to(device)
# Add a batch dimension:
input_tensor = input_tensor.unsqueeze(0)

model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
model.eval().to(device)

# Run the model and display the detections
boxes, classes, labels, indices = predict(input_tensor, model, device, 0.9)
print('boxes', type(boxes), boxes)
print('classes', type(classes), classes)
print('labels', type(labels), labels)
print('indices', type(indices), indices)


def generate_boxes_random(num=10, img_size=(1920, 1080), box_width=224):
    width, height = img_size
    num = width // 200
    labels, classes, indices = [16] * num, ['bird'] * num, list(range(num))
    half_box_width = box_width // 2
    centers = np.random.rand(num, 2)
    centers[:, 0] = half_box_width + centers[:, 0] * (width - box_width)
    centers[:, 1] = half_box_width + centers[:, 1] * (height - box_width)
    boxes = np.zeros((num, 4))
    boxes[:, 0], boxes[:, 1] = centers[:, 0] - half_box_width, centers[:, 1] - half_box_width
    boxes[:, 2], boxes[:, 3] = centers[:, 0] + half_box_width, centers[:, 1] + half_box_width
    boxes = boxes.astype(np.int32)
    print('boxes', boxes)
    # raise os.error
    # low=half_box_width, high=width-half_box_width
    return boxes, labels, classes, indices


# Draw random boxes
# boxes, labels, classes, indices = generate_boxes_random()
image = draw_boxes(boxes, labels, classes, image)

# Show the image:
img = Image.fromarray(image)
img.save('random_boxes.png')

# raise os.error

target_layers = [model.backbone]
targets = [FasterRCNNBoxScoreTarget(labels=labels, bounding_boxes=boxes)]
cam = EigenCAM(model,
               target_layers, 
               use_cuda=torch.cuda.is_available(),
               reshape_transform=fasterrcnn_reshape_transform)

grayscale_cam = cam(input_tensor, targets=targets)
print('grayscale_cam', type(grayscale_cam), grayscale_cam)
img = Image.fromarray(grayscale_cam[0, :] * 255)
img = img.convert('RGB')
img.save('grayscale_cam.png')


# Take the first image in the batch:
grayscale_cam = grayscale_cam[0, :]
cam_image = show_cam_on_image(image_float_np, grayscale_cam, use_rgb=True)


def generate_boxes_attention(grayscale_cam, box_width=224, num=10):
    height, width = grayscale_cam.shape
    num = width // 200
    labels, classes, indices = [16] * num, ['bird'] * num, list(range(num))
    half_box_width = box_width // 2
    centers = np.random.rand(num, 2)
    centers[:, 0] = half_box_width + centers[:, 0] * (width - box_width)
    centers[:, 1] = half_box_width + centers[:, 1] * (height - box_width)
    # =============================
    centers[0] = [1250, 550]
    centers[1] = [750, 500]
    centers[2] = [950, 300]
    centers[3] = [200, 900]
    centers[4] = [200, 400]
    centers[5] = [750, 900]
    # =============================
    boxes = np.zeros((num, 4))
    boxes[:, 0], boxes[:, 1] = centers[:, 0] - half_box_width, centers[:, 1] - half_box_width
    boxes[:, 2], boxes[:, 3] = centers[:, 0] + half_box_width, centers[:, 1] + half_box_width
    boxes = boxes.astype(np.int32)
    print('grayscale_cam.shape', grayscale_cam.shape)
    print('boxes', boxes)
    # raise os.error
    # low=half_box_width, high=width-half_box_width
    return boxes, labels, classes, indices


'''
generate bboxes
'''
# boxes, labels, classes, indices = generate_boxes_random()
# boxes, labels, classes, indices = generate_boxes_attention(grayscale_cam=grayscale_cam)


# And lets draw the boxes again:
# image_with_bounding_boxes = draw_boxes(boxes, labels, classes, cam_image)
image_with_bounding_boxes = cam_image

img = Image.fromarray(image_with_bounding_boxes)
img.save('detection.png')
