# -*- coding: utf-8 -*-
import os
import numpy as np
import cv2
import random
import argparse
from pathlib import Path

from PIL import Image
import util.misc as utils

import torch
from torch import nn
import torchvision.transforms as T
torch.set_grad_enabled(False)

import torch.nn.functional as F
                                
def get_args_parser():
    parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
    parser.add_argument('--lr', default=1e-4, type=float)
    parser.add_argument('--lr_backbone', default=1e-5, type=float)
    parser.add_argument('--batch_size', default=2, type=int)
    parser.add_argument('--weight_decay', default=1e-4, type=float)
    parser.add_argument('--epochs', default=90, type=int)
    parser.add_argument('--lr_drop', default=60, type=int)
    parser.add_argument('--clip_max_norm', default=0.1, type=float,
                        help='gradient clipping max norm')

    # Model parameters
    parser.add_argument('--frozen_weights', type=str, default=None,
                        help="Path to the pretrained model. If set, only the mask head will be trained")
    # * Backbone
    parser.add_argument('--backbone', default='resnet50', type=str,
                        help="Name of the convolutional backbone to use")
    parser.add_argument('--dilation', action='store_true',
                        help="If true, we replace stride with dilation in the last convolutional block (DC5)")
    parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
                        help="Type of positional embedding to use on top of the image features")
    parser.add_argument('--fpn', action='store_true')

    # * Transformer
    parser.add_argument('--enc_layers', default=6, type=int,
                        help="Number of encoding layers in the transformer")
    parser.add_argument('--dec_layers_hopd', default=3, type=int,
                        help="Number of hopd decoding layers in the transformer")
    parser.add_argument('--dec_layers_interaction', default=3, type=int,
                        help="Number of interaction decoding layers in the transformer")
    parser.add_argument('--dim_feedforward', default=2048, type=int,
                        help="Intermediate size of the feedforward layers in the transformer blocks")
    parser.add_argument('--hidden_dim', default=256, type=int,
                        help="Size of the embeddings (dimension of the transformer)")
    parser.add_argument('--dropout', default=0.1, type=float,
                        help="Dropout applied in the transformer")
    parser.add_argument('--nheads', default=8, type=int,
                        help="Number of attention heads inside the transformer's attentions")
    parser.add_argument('--num_queries', default=100, type=int,
                        help="Number of query slots")
    parser.add_argument('--pre_norm', action='store_true')

    # * Segmentation
    parser.add_argument('--masks', action='store_true',
                        help="Train segmentation head if the flag is provided")

    # HOI
    parser.add_argument('--num_obj_classes', type=int, default=80,
                        help="Number of object classes")
    parser.add_argument('--num_verb_classes', type=int, default=117,
                        help="Number of verb classes")
    parser.add_argument('--pretrained', type=str, default='',
                        help='Pretrained model path')
    parser.add_argument('--subject_category_id', default=0, type=int)
    parser.add_argument('--verb_loss_type', type=str, default='focal',
                        help='Loss type for the verb classification')

    # Loss
    parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
                        help="Disables auxiliary decoding losses (loss at each layer)")
    parser.add_argument('--no_use_matching', dest='use_matching', action='store_false')
    # parser.add_argument('--use_matching', action='store_true',
    #                     help="Use obj/sub matching 2class loss in first decoder, default not use")
    parser.add_argument('--hoi_thres', default=0.2, type=float)

    # * Matcher
    parser.add_argument('--set_cost_class', default=1, type=float,
                        help="Class coefficient in the matching cost")
    parser.add_argument('--set_cost_bbox', default=2.5, type=float,
                        help="L1 box coefficient in the matching cost")
    parser.add_argument('--set_cost_giou', default=1, type=float,
                        help="giou box coefficient in the matching cost")
    parser.add_argument('--set_cost_obj_class', default=1, type=float,
                        help="Object class coefficient in the matching cost")
    parser.add_argument('--set_cost_verb_class', default=1, type=float,
                        help="Verb class coefficient in the matching cost")
    parser.add_argument('--set_cost_matching', default=1, type=float,
                        help="Sub and obj box matching coefficient in the matching cost")

    # * Loss coefficients
    parser.add_argument('--mask_loss_coef', default=1, type=float)
    parser.add_argument('--dice_loss_coef', default=1, type=float)
    parser.add_argument('--bbox_loss_coef', default=2.5, type=float)
    parser.add_argument('--giou_loss_coef', default=1, type=float)
    parser.add_argument('--obj_loss_coef', default=1, type=float)
    parser.add_argument('--verb_loss_coef', default=2, type=float)
    parser.add_argument('--focal_alpha', default=0.5, type=float, help='focal loss alpha')
    parser.add_argument('--alpha', default=0.5, type=float, help='focal loss alpha')
    parser.add_argument('--matching_loss_coef', default=1, type=float)
    parser.add_argument('--eos_coef', default=0.1, type=float,
                        help="Relative classification weight of the no-object class")

    # dataset parameters
    parser.add_argument('--dataset_file', default='coco')
    parser.add_argument('--source_dir',default="")
    parser.add_argument('--coco_path', type=str)
    parser.add_argument('--coco_panoptic_path', type=str)
    parser.add_argument('--remove_difficult', action='store_true')
    parser.add_argument('--hoi_path', type=str)

    parser.add_argument('--output_dir', default='',
                        help='path where to save, empty for no saving')
    parser.add_argument('--device', default='cuda',
                        help='device to use for training / testing')
    parser.add_argument('--seed', default=42, type=int)
    parser.add_argument('--resume', default='', help='resume from checkpoint')
    parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
                        help='start epoch')
    parser.add_argument('--eval', action='store_true')
    parser.add_argument('--num_workers', default=2, type=int)

    # distributed training parameters
    parser.add_argument('--world_size', default=1, type=int,
                        help='number of distributed processes')
    parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')


    # decoupling training parameters
    parser.add_argument('--freeze_mode', default=0, type=int)
    parser.add_argument('--obj_reweight', action='store_true')
    parser.add_argument('--verb_reweight', action='store_true')
    parser.add_argument('--use_static_weights', action='store_true', 
                        help='use static weights or dynamic weights, default use dynamic')
    parser.add_argument('--queue_size', default=4704*1.0, type=float,
                        help='Maxsize of queue for obj and verb reweighting, default 1 epoch')
    parser.add_argument('--p_obj', default=0.7, type=float,
                        help='Reweighting parameter for obj')
    parser.add_argument('--p_verb', default=0.7, type=float,
                        help='Reweighting parameter for verb')
    parser.add_argument('--top_k', type=int, default=3)
    parser.add_argument('--NUM', type=int, default=10)    
    return parser


# COCO classes
CLASSES = [
    'N/A', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
    'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A',
    'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
    'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack',
    'umbrella', 'N/A', 'N/A', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
    'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
    'skateboard', 'surfboard', 'tennis racket', 'bottle', 'N/A', 'wine glass',
    'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
    'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
    'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', 'N/A',
    'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
    'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A',
    'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier',
    'toothbrush'
]

# colors for visualization
COLORS = [[0.000, 0.447, 0.741], [0.850, 0.325, 0.098], [0.929, 0.694, 0.125],
          [0.494, 0.184, 0.556], [0.466, 0.674, 0.188], [0.301, 0.745, 0.933]]

# standard PyTorch mean-std input image normalization
transform = T.Compose([
    T.Resize(800),
    T.ToTensor(),
    T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

def getJetColor(v, vmin, vmax):
    c = np.zeros((3))
    if (v < vmin):
        v = vmin
    if (v > vmax):
        v = vmax
    dv = vmax - vmin
    if (v < (vmin + 0.125 * dv)): 
        c[0] = 256 * (0.5 + (v * 4)) #B: 0.5 ~ 1
    elif (v < (vmin + 0.375 * dv)):
        c[0] = 255
        c[1] = 256 * (v - 0.125) * 4 #G: 0 ~ 1
    elif (v < (vmin + 0.625 * dv)):
        c[0] = 256 * (-4 * v + 2.5)  #B: 1 ~ 0
        c[1] = 255
        c[2] = 256 * (4 * (v - 0.375)) #R: 0 ~ 1
    elif (v < (vmin + 0.875 * dv)):
        c[1] = 256 * (-4 * v + 3.5)  #G: 1 ~ 0
        c[2] = 255
    else:
        c[2] = 256 * (-4 * v + 4.5) #R: 1 ~ 0.5                      
    return c

def colorize(gray_img):
    out = np.zeros(gray_img.shape + (3,))
    for y in range(out.shape[0]):
        for x in range(out.shape[1]):
            out[y,x,:] = getJetColor(gray_img[y,x], 0, 1)
    return out

# for output bounding box post-processing
def box_cxcywh_to_xyxy(x):
    x_c, y_c, w, h = x.unbind(1)
    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
         (x_c + 0.5 * w), (y_c + 0.5 * h)]
    return torch.stack(b, dim=1)

def rescale_bboxes(out_bbox, size):
    img_w, img_h = size
    b = box_cxcywh_to_xyxy(out_bbox)
    b = b * torch.tensor([img_w, img_h, img_w, img_h], dtype=torch.float32)
    return b


from models import build_model

parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()

model, criterion, postprocessors = build_model(args)


if args.pretrained:
    checkpoint = torch.load(args.pretrained, map_location='cpu')
    print(args.pretrained)
    if args.eval:
        model = utils.load_model(model, checkpoint['model'])
    else:
        model = utils.load_model(model, checkpoint['model'])


model.eval()

NUM = args.NUM
source_dir = args.source_dir
imgpaths = [os.path.join(source_dir, item) for item in os.listdir(source_dir)]
random.shuffle(imgpaths)
imgpaths = imgpaths[:NUM]

output_dir = Path(args.output_dir)
try:
    os.makedirs(output_dir, exist_ok = True)
    print("Directory created successfully" )
except OSError as error:
    print("Directory  can not be created")

topN = args.top_k
num_id = 0
for imgpath in imgpaths:
    num_id+=1
    print("{} is start".format(num_id))
    im = Image.open(imgpath)
    
    img = transform(im).unsqueeze(0)
    
    # propagate through the model
    outputs = model(img)
    
    out_obj_logits = outputs['pred_obj_logits']
    out_verb_logits = outputs['pred_verb_logits']
    
    obj_scores = out_obj_logits.softmax(-1)[0, :, :-1].max(-1).values
    verb_scores = out_verb_logits.sigmoid()[0]

    index = 0
    for verb_score in verb_scores: 
        #verb_query = np.sum(np.sort(verb_score.numpy())[::-1][:topN])
        verb_query = torch.max(verb_score)
        #obj_scores[index] *= verb_query
        obj_scores[index] = verb_query
        index += 1
    
    thres = np.sort(obj_scores.numpy())[::-1][topN]
    keep = obj_scores > thres
    
    out_sub_boxes = outputs['pred_sub_boxes']
    out_obj_boxes = outputs['pred_obj_boxes']
    
    

    # convert boxes from [0; 1] to image scales
    sub_boxes = rescale_bboxes(out_sub_boxes[0, keep], im.size)
    obj_boxes = rescale_bboxes(out_obj_boxes[0, keep], im.size)
    
    # use lists to store the outputs via up-values
    conv_features, enc_attn_weights, dec_attn_weights, dec_step2_attn_weights = [], [], [], []
    dec_step3_attn_weights = []
    hooks = [
        model.backbone[-2].register_forward_hook(
            lambda self, input, output: conv_features.append(output)
        ),
        model.transformer.encoder.layers[-1].self_attn.register_forward_hook(
            lambda self, input, output: enc_attn_weights.append(output[1])
        ),
        model.transformer.decoder.layers[-1].multihead_attn.register_forward_hook(
            lambda self, input, output: dec_attn_weights.append(output[1])
        ),#   N, 35*35, 35*35 
        model.transformer.human_decoder.layers[-1].multihead_attn.register_forward_hook(
            lambda self, input, output: dec_step3_attn_weights.append(output[1])
        ),#   N, 35*35, 35*35 
        model.transformer.interaction_decoder.layers[-1].multihead_attn.register_forward_hook(
            lambda self, input, output: dec_step2_attn_weights.append(output[1])
        ),
    ]
    
    # propagate through the model
    outputs = model(img)
    
    for hook in hooks:
        hook.remove()
    
    # don't need the list anymore
    conv_features = conv_features[0]
    enc_attn_weights = enc_attn_weights[0]
    dec_attn_weights = dec_attn_weights[0]
    dec_step2_attn_weights = dec_step2_attn_weights[0]
    dec_step3_attn_weights = dec_step3_attn_weights[0]
    
    """Now let's visualize them"""
    
    # get the feature map shape
    h, w = conv_features['0'].tensors.shape[-2:]
    

    img_cv = cv2.imread(imgpath)
    height, width, _ = img_cv.shape

    #   人物pair decoder的特征图
    imgs_step1 = []
    for idx, (sx1, sy1, sx2, sy2), (ox1, oy1, ox2, oy2) in zip(keep.nonzero(as_tuple=False), sub_boxes, obj_boxes):
        img_copy = img_cv.copy()

        feature_map = dec_attn_weights[0, idx].view(h, w).numpy()
        feature_map = ( (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min()) ) * 255

        feature_map = cv2.resize(feature_map, (width,height), interpolation=cv2.INTER_CUBIC)

        feature_map = (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min())

        feature_map = colorize(feature_map)

        img_copy = img_copy * 0.5  + feature_map * 0.5
        cv2.rectangle(img_copy, (int(sx1),int(sy1)), (int(sx2),int(sy2)), (0,255,0), 2)
        cv2.rectangle(img_copy, (int(ox1),int(oy1)), (int(ox2),int(oy2)), (0,0,255), 2)

        imgs_step1.append(img_copy)

    if len(sub_boxes) == 1:
        cv2.imwrite('{}/{}_step1.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), imgs_step1[0])
    else:
        cv2.imwrite('{}/{}_step1.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), np.concatenate(tuple(imgs_step1), axis=1))
    
    #   动作decoder的特征图
    imgs_step2 = []
    for idx, (sx1, sy1, sx2, sy2), (ox1, oy1, ox2, oy2) in zip(keep.nonzero(as_tuple=False), sub_boxes, obj_boxes):
        img_copy = img_cv.copy()

        feature_map = dec_step2_attn_weights[0, idx].view(h, w).numpy()
        feature_map = ( (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min()) ) * 255

        feature_map = cv2.resize(feature_map, (width,height), interpolation=cv2.INTER_CUBIC)

        feature_map = (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min())

        feature_map = colorize(feature_map)

        img_copy = img_copy * 0.5  + feature_map * 0.5
        cv2.rectangle(img_copy, (int(sx1),int(sy1)), (int(sx2),int(sy2)), (0,255,0), 2)
        cv2.rectangle(img_copy, (int(ox1),int(oy1)), (int(ox2),int(oy2)), (0,0,255), 2)

        imgs_step2.append(img_copy)

    if len(sub_boxes) == 1:
        cv2.imwrite('{}/{}_step2.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), imgs_step2[0])
    else:
        cv2.imwrite('{}/{}_step2.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), np.concatenate(tuple(imgs_step2), axis=1))

    imgs_step3 = []
    for idx, (sx1, sy1, sx2, sy2), (ox1, oy1, ox2, oy2) in zip(keep.nonzero(as_tuple=False), sub_boxes, obj_boxes):
        img_copy = img_cv.copy()

        feature_map = dec_step3_attn_weights[0, idx].view(h, w).numpy()
        feature_map = ( (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min()) ) * 255

        feature_map = cv2.resize(feature_map, (width,height), interpolation=cv2.INTER_CUBIC)

        feature_map = (feature_map - feature_map.min()) / (feature_map.max() - feature_map.min())

        feature_map = colorize(feature_map)

        img_copy = img_copy * 0.5  + feature_map * 0.5
        cv2.rectangle(img_copy, (int(sx1),int(sy1)), (int(sx2),int(sy2)), (0,255,0), 2)
        cv2.rectangle(img_copy, (int(ox1),int(oy1)), (int(ox2),int(oy2)), (0,0,255), 2)

        imgs_step3.append(img_copy)

    if len(sub_boxes) == 1:
        cv2.imwrite('{}/{}_step3.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), imgs_step3[0])
    else:
        cv2.imwrite('{}/{}_step3.jpg'.format(output_dir, imgpath.split('/')[-1][:-4]), np.concatenate(tuple(imgs_step3), axis=1))


    #print("{} is end".format(num_id))    
