#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import torch
import os
import csv
import numpy as np
import matplotlib.pyplot as plt
import math

from detectron2.data import DatasetCatalog, MetadataCatalog, build_detection_test_loader
from detectron2.modeling.meta_arch.retinanet import permute_to_N_HWA_K
from detectron2.structures import Boxes, pairwise_iou, Instances
from detectron2.layers import cat, nonzero_tuple, batched_nms

from .dataset_utils import get_det_dicts
from .detector_utils import PTLDatasetMapper
from pathlib import Path
from tqdm import tqdm
from typing import List


def compute_angle(a, b):
    
    return math.degrees(math.atan(a/b))


def compute_distance(a, b):
    
    return math.sqrt(a**2 + b**2)


def dist2prob(dist, tau=0.0):
    
    if tau==0.0:
        inv = 1/dist
        prob = inv/np.sum(inv)
    else:
        exp = np.exp(-dist/tau)
        prob = exp/np.sum(exp)        
    
    return prob


def calc_md(feature, cov_inv):

    return np.dot(np.matmul(feature, cov_inv), feature)


def plot_md(df, output_path, display_cdf=True, md_clip=100):
    
    df_sorted = df.sort_values(by=["mdistance"])
    df_sorted[df_sorted["mdistance"] > md_clip] = md_clip
    ax = df_sorted.hist(column=["mdistance"], bins=100, figsize=(12, 6), cumulative=display_cdf)
    
    ax[0][0].set_title("Mdistance_CDF")
    ax[0][0].set_ylabel("# of accumulated insts")
    ax[0][0].set_xlabel("mdistance (clipped at 100)")
    
    fig = ax[0][0].get_figure()
    fig.savefig(os.path.join(output_path, "cdf.jpg"))
    
    with open(os.path.join(output_path, "mdistance.csv"), "w") as f:
        w = csv.writer(f, delimiter = ',')
        for index, row in df.iterrows():
            w.writerow([row["img_name"], row["score"], row["mdistance"]])
            

def plot_hist_v2(df, output_path, name, font_size=60):
    
    ax = df.hist(column=["mdistance"], bins=range(0, 100, 1), figsize=(12, 8), 
                 xlabelsize=font_size, ylabelsize=font_size)
    
    ax[0][0].set_xticks(np.arange(0, 101, 100))
    ax[0][0].set_yticks(np.arange(0, 701, 100))
    ax[0][0].set_xlabel("domain gap", fontsize=font_size)

    plt.setp(ax[0][0].get_yticklabels()[1:7], visible=False)
    
    fig = ax[0][0].get_figure()
    fig.gca().set_title("")
    fig.savefig(os.path.join(output_path, name + ".jpg"), bbox_inches='tight')


def plot_hist(df, output_path, name):
    
    ax = df.hist(column=["mdistance"], bins=range(0, 100, 1), figsize=(12, 6))
    
    ax[0][0].set_title("Sampled SynPerson")
    ax[0][0].set_ylabel("# of selected imgs")
    ax[0][0].set_xlabel("mdistance")
    
    fig = ax[0][0].get_figure()
    fig.savefig(os.path.join(output_path, name + ".jpg"))
    

def plot_hr_map(data, data_name, output_path):

    fig = plt.figure(figsize=(12, 8))
    ax = fig.add_subplot(111)
    im = ax.imshow(data, cmap='jet', interpolation='none', aspect='auto', extent=[0, 30, 50, 0])
    
    jump_x = 2.5
    jump_y = 2.5
    x_pos = np.linspace(start=0, stop=30,  num=6, endpoint=False)
    y_pos = np.linspace(start=0, stop=50, num=10, endpoint=False)
    
    for y_ind, y in enumerate(y_pos):
        for x_ind, x in enumerate(x_pos):
            text_x = x + jump_x
            text_y = y + jump_y
            ax.text(text_x, text_y, np.round(data[y_ind, x_ind], 2), color='black', ha='center', va='center')

    ax.set_xlabel("radius (m)")
    ax.set_ylabel("height (m)")
    ax.set_title(data_name)

    fig.colorbar(im)
    plt.savefig(os.path.join(output_path, data_name + ".jpg"))
    
    
def plot_hr_map_v2(data, data_name, output_path, max_value=1.0, font_size=60):

    fig = plt.figure(figsize=(14, 12))
    ax = fig.add_subplot(111)
    im = ax.imshow(data/np.sum(data), cmap='Blues', interpolation='none', aspect='auto', vmin=0, vmax=max_value)
    
    ax.set_xticks([0, 5])
    ax.set_yticks([0, 9])  
    
    ax.set_xticklabels(['5m','30m'], fontsize=font_size)
    ax.set_yticklabels(['5m','50m'], fontsize=font_size)
    
    plt.savefig(os.path.join(output_path, data_name + ".jpg"), bbox_inches='tight')
    
    
def plot_dv_map(data, data_name, output_path):

    fig = plt.figure(figsize=(12, 8))
    ax = fig.add_subplot(111)
    im = ax.imshow(data, cmap='jet', interpolation='none', aspect='equal', extent=[0, 90, 60, 0])
    
    jump_x = 7.5
    jump_y = 10
    x_pos = np.linspace(start=0, stop=90,  num=6, endpoint=False)
    y_pos = np.linspace(start=0, stop=60,  num=3, endpoint=False)
    
    for y_ind, y in enumerate(y_pos):
        for x_ind, x in enumerate(x_pos):
            text_x = x + jump_x
            text_y = y + jump_y
            ax.text(text_x, text_y, np.round(data[y_ind, x_ind], 2), color='black', ha='center', va='center')

    ax.set_xlabel("viewing angle (degree)")
    ax.set_ylabel("distance (m)")
    ax.set_title(data_name)

    fig.colorbar(im)
    plt.savefig(os.path.join(output_path, data_name + ".jpg"))
            
            
def permute_to_N_HWA_L(tensor):
    
    N, C, H, W = tensor.shape
    tensor = tensor.view(N, -1, C, H, W)
    tensor = tensor.permute(0, 3, 4, 1, 2)
    tensor = tensor.reshape(N, -1, C) # Size=(N, HW, feature_length)
    
    return tensor


def extr_conv_features(feature):

    m = torch.nn.ZeroPad2d(1) # torch.nn.ReplicationPad2d(1) / torch.nn.ZeroPad2d(1)
    feature = m(feature) # padding (kernel 3x3)
            
    # Create conv features
    temp_feature = torch.roll(feature, (1, 1), (2, 3))                                 # left-top                
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (1, 0), (2, 3))), 1)   # mid-top
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (1, -1), (2, 3))), 1)  # right-top
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (0, 1), (2, 3))), 1)   # left-mid
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (0, 0), (2, 3))), 1)   # mid-mid
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (0, -1), (2, 3))), 1)  # right-mid
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (-1, 1), (2, 3))), 1)  # left-bot
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (-1, 0), (2, 3))), 1)  # mid-bot
    temp_feature = torch.cat((temp_feature, torch.roll(feature, (-1, -1), (2, 3))), 1) # right-bot

    N, C, H, W = feature.shape
    
    return temp_feature[:, :, 1:(H-1), 1:(W-1)] # Size=(N, 64*9, Hi, Wi)


def extr_features_fromDet(cfg, model, dataset_name, dataset_root, output_path, extr_mode):
    # Setup dataloader
    DatasetCatalog.register(dataset_name, lambda: get_det_dicts(dataset_path=dataset_root)) 
    MetadataCatalog.get(dataset_name).thing_classes = ["person"]
    dataset_dicts = DatasetCatalog.get(dataset_name)
    data_loader = build_detection_test_loader(dataset_dicts, mapper=PTLDatasetMapper(cfg, is_train=True))
    
    # Setup output path
    Path(output_path).mkdir(parents=True, exist_ok=True)

    # feature extraction
    num_human_insts = 0
    with open(os.path.join(output_path, "features.csv"), "w") as f:
        w = csv.writer(f, delimiter = ',')
        with torch.no_grad():
            for idx, d in enumerate(tqdm(data_loader)):            
                ## Load gt humans
                gt_instances = [x["instances"].to(model.device) for x in d]
                gt_human_boxes = gt_instances[0].gt_boxes[gt_instances[0].gt_classes == 0]
            
                ## Check if any human exists
                if len(gt_human_boxes) == 0:
                    continue
            
                ## preprocess
                images = model.preprocess_image(d)
        
                ## backbone and neck
                features = model.backbone(images.tensor)
                features = [features[f] for f in model.head_in_features]
        
                ## anchors
                anchors = model.anchor_generator(features)
                
                ## Extract the features
                extr_features = []
                for feature in features:
                    temp_feature = model.head.cls_subnet(feature)
                    ## Extract 3x3 features if needed
                    if extr_mode == "conv":
                        extr_feature = extr_conv_features(temp_feature)
                    else:
                        extr_feature = temp_feature
                    extr_features.append(extr_feature) # Size=(N, Dim, Hi, Wi)    
                extr_features = [permute_to_N_HWA_L(x) for x in extr_features]
            
                ## head
                pred_logits, pred_anchor_deltas = model.head(features)
                pred_logits = [permute_to_N_HWA_K(x, model.num_classes) for x in pred_logits]
                pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
            
                ## Generate the prediction results
                results: List[Instances] = []
                for img_idx, image_size in enumerate(images.image_sizes): # per image in the batch
                    pred_logits_per_image = [x[img_idx] for x in pred_logits]
                    deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
                    features_per_image = [x[img_idx] for x in extr_features]
                
                    boxes_all = []
                    scores_all = []
                    class_idxs_all = []
                    features_all = []
                
                    for box_cls_i, box_reg_i, anchors_i, features_i in zip(pred_logits_per_image, 
                                                                           deltas_per_image, 
                                                                           anchors, 
                                                                           features_per_image): # per level
                        ## Get the scores [HxWxAxK]
                        predicted_prob = box_cls_i.flatten().sigmoid_()

                        ## Check if the scores > thresh
                        keep_idxs = predicted_prob > model.test_score_thresh
                        predicted_prob = predicted_prob[keep_idxs]
                        topk_idxs = nonzero_tuple(keep_idxs)[0]
                    
                        ## Select topk
                        num_topk = min(model.test_topk_candidates, topk_idxs.size(0))
                        predicted_prob, idxs = predicted_prob.sort(descending=True)
                        predicted_prob = predicted_prob[:num_topk]
                        topk_idxs = topk_idxs[idxs[:num_topk]]

                        anchor_idxs = topk_idxs // model.num_classes # HxWxA
                        classes_idxs = topk_idxs % model.num_classes # K
                        features_idxs = anchor_idxs // 9 # HxW

                        box_reg_i = box_reg_i[anchor_idxs]
                        anchors_i = anchors_i[anchor_idxs]
                        features_i = features_i[features_idxs]

                        predicted_boxes = model.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor)

                        boxes_all.append(predicted_boxes)
                        scores_all.append(predicted_prob)
                        class_idxs_all.append(classes_idxs)
                        features_all.append(features_i)
                    
                    boxes_all, scores_all, class_idxs_all, features_all = [
                        cat(x) for x in [boxes_all, scores_all, class_idxs_all, features_all]
                    ]
                    keep = batched_nms(boxes_all, scores_all, class_idxs_all, model.test_nms_thresh)
                    keep = keep[:model.max_detections_per_image]
                
                    ## max detection per img
                    boxes_all = boxes_all[keep]
                    scores_all = scores_all[keep]
                    class_idxs_all = class_idxs_all[keep]
                    features_all = features_all[keep]
                
                    ## Consider only the human class
                    boxes_all = boxes_all[class_idxs_all == 0]
                    scores_all = scores_all[class_idxs_all == 0]
                    features_all = features_all[class_idxs_all == 0]
                    class_idxs_all = class_idxs_all[class_idxs_all == 0]

                    results_per_image = Instances(image_size)
                    results_per_image.pred_boxes = Boxes(boxes_all)
                    results_per_image.scores = scores_all
                    results_per_image.pred_classes = class_idxs_all
                    results_per_image.features = features_all
                    results.append(results_per_image)

                ## Check if any human is detected
                pred_human_boxes = results[0].pred_boxes
                if len(pred_human_boxes) > 0:
                    ## Check if the detected human match any gt human  
                    iou = pairwise_iou(gt_human_boxes, pred_human_boxes)
                    values, indices = torch.max(iou, 1)
                    indices_iou = indices[values > 0.5]
                    
                    ## According to the # of gts
                    extr_human_features = results[0].features[indices_iou].tolist()
                    extr_human_scores = results[0].scores[indices_iou].tolist()
            
                    for i in range(len(extr_human_features)):
                        w.writerow([d[0]['image_id'], extr_human_scores[i]] + extr_human_features[i])
                
                    num_human_insts += len(extr_human_features)
          
    return num_human_insts


def batched_extr_features_fromDet(cfg, model, dataset_name, dataset_root, output_path, extr_mode, batch_size=1):
    # Setup dataloader
    DatasetCatalog.register(dataset_name, lambda: get_det_dicts(dataset_path=dataset_root)) 
    MetadataCatalog.get(dataset_name).thing_classes = ["person"]
    dataset_dicts = DatasetCatalog.get(dataset_name)
    data_loader = build_detection_test_loader(dataset_dicts, mapper=PTLDatasetMapper(cfg, is_train=True), batch_size=batch_size)
    
    # Setup output path
    Path(output_path).mkdir(parents=True, exist_ok=True)

    # feature extraction
    num_human_insts = 0
    with open(os.path.join(output_path, "features.csv"), "w") as f:
        w = csv.writer(f, delimiter = ',')
        with torch.no_grad():
            for d in tqdm(data_loader):           
                # Network forward
                images = model.preprocess_image(d)
                features = model.backbone(images.tensor) 
                features = [features[f] for f in model.head_in_features]
                anchors = model.anchor_generator(features)
                
                # Extract features
                extr_features = []
                for feature in features:
                    temp_feature = model.head.cls_subnet(feature)
                    # Extract 3x3 features if needed
                    if extr_mode == "conv":
                        extr_feature = extr_conv_features(temp_feature)
                    else:
                        extr_feature = temp_feature
                    extr_features.append(extr_feature) # Size=(N, Dim, Hi, Wi)    
                extr_features = [permute_to_N_HWA_L(x) for x in extr_features]
            
                # head
                pred_logits, pred_anchor_deltas = model.head(features)
                pred_logits = [permute_to_N_HWA_K(x, model.num_classes) for x in pred_logits]
                pred_anchor_deltas = [permute_to_N_HWA_K(x, 4) for x in pred_anchor_deltas]
            
                # Generate the prediction results
                # per image
                for img_idx, image_size in enumerate(images.image_sizes):
                    # Load gt humans
                    gt_instances = d[img_idx]["instances"].to(model.device)
                    gt_human_boxes = gt_instances.gt_boxes[gt_instances.gt_classes == 0]
                    
                    # Check if any human exists
                    if len(gt_human_boxes) == 0:
                        continue
                    
                    pred_logits_per_image = [x[img_idx] for x in pred_logits]
                    deltas_per_image = [x[img_idx] for x in pred_anchor_deltas]
                    features_per_image = [x[img_idx] for x in extr_features]
                
                    boxes_all = []
                    scores_all = []
                    class_idxs_all = []
                    features_all = []
                    # per level
                    for box_cls_i, box_reg_i, anchors_i, features_i in zip(pred_logits_per_image, 
                                                                           deltas_per_image, 
                                                                           anchors, 
                                                                           features_per_image):
                        # Get the scores [HxWxAxK]
                        predicted_prob = box_cls_i.flatten().sigmoid_()

                        # Check if the scores > thresh
                        keep_idxs = predicted_prob > model.test_score_thresh
                        predicted_prob = predicted_prob[keep_idxs]
                        topk_idxs = nonzero_tuple(keep_idxs)[0]
                    
                        # Select topk
                        num_topk = min(model.test_topk_candidates, topk_idxs.size(0))
                        predicted_prob, idxs = predicted_prob.sort(descending=True)
                        predicted_prob = predicted_prob[:num_topk]
                        topk_idxs = topk_idxs[idxs[:num_topk]]

                        anchor_idxs = topk_idxs // model.num_classes # HxWxA
                        classes_idxs = topk_idxs % model.num_classes # K
                        features_idxs = anchor_idxs // 9 # HxW

                        box_reg_i = box_reg_i[anchor_idxs]
                        anchors_i = anchors_i[anchor_idxs]
                        features_i = features_i[features_idxs]

                        predicted_boxes = model.box2box_transform.apply_deltas(box_reg_i, anchors_i.tensor)

                        boxes_all.append(predicted_boxes)
                        scores_all.append(predicted_prob)
                        class_idxs_all.append(classes_idxs)
                        features_all.append(features_i)
                    
                    boxes_all, scores_all, class_idxs_all, features_all = [
                        cat(x) for x in [boxes_all, scores_all, class_idxs_all, features_all]
                    ]
                    keep = batched_nms(boxes_all, scores_all, class_idxs_all, model.test_nms_thresh)
                    keep = keep[:model.max_detections_per_image]
                
                    # max detection per img
                    boxes_all = boxes_all[keep]
                    scores_all = scores_all[keep]
                    class_idxs_all = class_idxs_all[keep]
                    features_all = features_all[keep]
                
                    # Consider only the human class
                    boxes_all = boxes_all[class_idxs_all == 0]
                    scores_all = scores_all[class_idxs_all == 0]
                    features_all = features_all[class_idxs_all == 0]

                    # Check if any human is detected
                    pred_human_boxes = Boxes(boxes_all)
                    if len(pred_human_boxes) > 0:
                        # Check if the detected human match any gt human  
                        iou = pairwise_iou(gt_human_boxes, pred_human_boxes)
                        values, indices = torch.max(iou, 1)
                        indices_iou = indices[values > 0.5]
                    
                        # According to the # of gts
                        extr_human_features = features_all[indices_iou].tolist()
                        extr_human_scores = scores_all[indices_iou].tolist()
            
                        for i in range(len(extr_human_features)):
                            w.writerow([d[img_idx]['image_id'], extr_human_scores[i]] + extr_human_features[i])
                
                        num_human_insts += len(extr_human_features)
          
    return num_human_insts