#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import random
import pickle as pk
import pandas as pd
import cv2
import json
import copy

from detectron2.structures import BoxMode
from tqdm import tqdm
from pathlib import Path
   

def gen_img_crop(dicts, output_root, output_name, min_bbox_hw=10, target_hw=256):
    
    output_train_path = os.path.join(output_root, "train" + output_name)
    output_test_path = os.path.join(output_root, "test" + output_name)
    Path(output_train_path).mkdir(parents=True, exist_ok=True)
    Path(output_test_path).mkdir(parents=True, exist_ok=True)
    
    for data in tqdm(dicts):
        img = cv2.imread(data["file_name"])
        img_id = data["image_id"]
        for idx, ann in enumerate(data["annotations"]):
            x, y, w, h = ann["bbox"]
            
            if w<min_bbox_hw or h<min_bbox_hw:
                continue

            # Make sure that all the coordinates are integer
            x, y, w, h = round(x), round(y), round(w), round(h)
                        
            crop = img[y:y+h, x:x+w]
            
            # scale to the target height/width
            if h>w:
                scale = target_hw / h
                new_bbox_h, new_bbox_w = target_hw, round(scale * w)
            else:
                scale = target_hw / w
                new_bbox_h, new_bbox_w = round(scale * h), target_hw              
                
            crop = cv2.resize(crop, (new_bbox_w, new_bbox_h), interpolation=cv2.INTER_LINEAR)

            # padding
            delta_w = target_hw - new_bbox_w
            delta_h = target_hw - new_bbox_h
            top, bottom = delta_h//2, delta_h-(delta_h//2)
            left, right = delta_w//2, delta_w-(delta_w//2)
            crop = cv2.copyMakeBorder(crop, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0])                                    

            if idx == 0:
                cv2.imwrite(os.path.join(output_train_path, img_id + ".jpg"), crop)
                cv2.imwrite(os.path.join(output_test_path, img_id + ".jpg"), crop)
            else:
                cv2.imwrite(os.path.join(output_train_path, img_id + "_" + str(idx) + ".jpg"), crop)    
                cv2.imwrite(os.path.join(output_test_path, img_id + "_" + str(idx) + ".jpg"), crop)
                
                
def count_dicts(dicts, prefix, min_bbox_hw=10):
    
    num_imgs = 0
    num_insts = 0
    num_valid_insts = 0
    for img in dicts:
        num_imgs += 1
        for annt in img["annotations"]:
            num_insts += 1
            if annt["bbox"][2] >= min_bbox_hw and annt["bbox"][3] >= min_bbox_hw:
                num_valid_insts += 1
            
    print("%s: %d imgs; %d insts; %d valid insts.\n" % (prefix, num_imgs, num_insts, num_valid_insts))


def dump_dicts(dicts, output_path, name):
    
    with open(os.path.join(output_path, name), "w") as f:
        json.dump(dicts, f)
        

def get_det_dicts(dataset_path, name="dicts.json"):
    
    with open(os.path.join(dataset_path, name), "r") as f:
        return json.load(f)
        

def get_icg_dicts(dataset_root, mode="Train", scaling_ratio=4):

    WIDTH = int(6000 / scaling_ratio)
    HEIGHT = int(4000 / scaling_ratio)
    
    annotation_path = os.path.join(dataset_root, "training_set_%dx%d" % (WIDTH, HEIGHT), "gt", "bounding_box", "bounding_boxes", "person", "imgIdToBBoxArray.p")
    image_path = os.path.join(dataset_root, "training_set_%dx%d" % (WIDTH, HEIGHT), "images")

    # Load the annotation file
    with open(annotation_path, "rb") as pickleFile:
        annotation_dict = pk.load(pickleFile)

    dataset_dicts = []
    for image_id in annotation_dict:
        # Dataset Split
        if mode=="Train" and int(image_id) > 475:
            continue
        elif mode=="Test" and int(image_id) <= 475:
            continue
            
        img_dict = {}
        img_dict["file_name"] = os.path.join(image_path, image_id + ".jpg")
        img_dict["height"] = HEIGHT
        img_dict["width"] = WIDTH
        img_dict["image_id"] = str(image_id)
        img_dict["type"] = "uav"
    
        ann_dicts = []
        for ann in annotation_dict[image_id]:
            ann_dict = {}                    
            ann_dict["category_id"] = 0
            ann_dict["bbox_mode"] = BoxMode.XYXY_ABS
            ann_dict["bbox"] = [ann[0, 0], ann[0, 1], ann[1, 0], ann[1, 1]]
            ann_dicts.append(ann_dict)
        
        img_dict["annotations"] = ann_dicts
        dataset_dicts.append(img_dict)
        
    return dataset_dicts


def get_oku_dicts(dataset_root, mode="Test-Set", 
                  sampling_rate=10, scaling_ratio=3, ignore_occ=False, ignore_lost=True):
    
    DRONE = ["Drone1", "Drone2"]
    TIME = ["Morning", "Noon"]
    WIDTH = int(3840 / scaling_ratio)
    HEIGHT = int(2160 / scaling_ratio)
    
    dataset_path = os.path.join(dataset_root, mode)
    
    # List all files
    label_path = os.path.join(dataset_path, "Labels/SingleActionLabels/3840x2160/")
    label_names = os.listdir(label_path)
    
    dataset_dicts = []
    for name in label_names: # For each label file
        # Read the label file
        data = pd.read_csv(os.path.join(label_path, name), sep=" " , header=None,
                           names=["Track_ID", "xmin", "ymin", "xmax", "ymax", "frame", 
                                  "lost", "occluded", "generated", "label", "actions"])
        data_gp = data.groupby(["frame"])
        
        # Path to the images
        drone_id, time_id, scene_id = name.split(".")[:3]
        img_path = os.path.join(dataset_path, DRONE[int(drone_id) - 1], TIME[int(time_id) - 1],
                                "Extracted-Frames-" + str(WIDTH) + "x" + str(HEIGHT), 
                                os.path.splitext(name)[0])
        all_images = os.listdir(img_path)
        
        for img in all_images: # for each frame
            img_key = int(os.path.splitext(img)[0])
            if img_key % sampling_rate == 0: # Sample frames
                if img_key % 180 != 0 or img_key == 0: # Fix duplicated bboxes
                
                    video_id = drone_id + "_" + time_id + "_" + scene_id
                        
                    # Create new img dict
                    img_dict = {}
                    img_dict["file_name"] = os.path.join(img_path, img)
                    img_dict["height"] = HEIGHT
                    img_dict["width"] = WIDTH
                    img_dict["image_id"] = video_id + "_" + str(img_key)
                    img_dict["type"] = "uav"
                    
                    # Check if annotation exists
                    ann_dicts = []
                    if img_key in data_gp.groups.keys():
                        item = data_gp.get_group(img_key)
                        for idx, ann in item.iterrows(): # For each annotation
                        
                            # Ignore invisible annotations if specified
                            if ann["lost"] == 1 and ignore_lost:
                                continue
                            elif ann["occluded"] == 1 and ignore_occ:
                                continue
                            else:
                                ann_dict = {}
                                X = ann["xmin"] / scaling_ratio
                                Y = ann["ymin"] / scaling_ratio
                                W = (ann["xmax"] - ann["xmin"]) / scaling_ratio
                                H = (ann["ymax"] - ann["ymin"]) / scaling_ratio
                    
                                ann_dict["category_id"] = 0
                                ann_dict["bbox_mode"] = BoxMode.XYWH_ABS
                                ann_dict["bbox"] = [X, Y, W, H]
                                ann_dicts.append(ann_dict)
                    img_dict["annotations"] = ann_dicts
                    dataset_dicts.append(img_dict)                         
    
    return dataset_dicts


def get_vis_dicts(dataset_root, mode="test-dev", cat=None):
    
    # Visdrone class:
    # [ignore, pedestrian, people, bicycle, car, van, truck, tricycle, awning-tricycle, bus, motor, others]
    if cat == "ped":
        print("\nConsider only pedestrian...")
        human_classes = [1]
    elif cat == "peo":
        print("\nConsider only people...")
        human_classes = [2]
    else:
        human_classes = [1, 2]
    
    # List all files
    label_path = os.path.join(dataset_root, "VisDrone2019-DET-" + mode, "annotations")
    label_name_lists = os.listdir(label_path)
    
    dataset_dicts = []
    for label_name in label_name_lists: # For each label
        # Load the image
        img_path = os.path.join(dataset_root, "VisDrone2019-DET-" + mode, "images")
        img_name = label_name.replace('txt', 'jpg')
        
        # Get the image info
        img = cv2.imread(os.path.join(img_path, img_name))
        H, W, _ = img.shape
        
        # Create an img dict
        data = {}
        data["file_name"] = os.path.join(img_path, img_name)  
        data["height"] = H
        data["width"] = W
        data["image_id"] = os.path.splitext(img_name)[0]
        data["type"] = "uav"
        
        ann_dicts = []
        with open(os.path.join(label_path, label_name)) as f:
            label_lists = f.readlines()
            for label in label_lists: # For each annotation
            
                label = label.strip().split(",")
                x = int(label[0])
                y = int(label[1])
                w = int(label[2])
                h = int(label[3])
                score = float(label[4])
                cat = int(label[5])
                trun = float(label[6])
                occu = float(label[7])
                
                # Consider only the human classes
                if cat not in human_classes:
                    continue
                
                # Ignored bboxes which will be ignored in evaluation
                if score == 0:
                    continue

                ann_dict = {}                   
                ann_dict["category_id"] = 0
                ann_dict["bbox_mode"] = BoxMode.XYWH_ABS
                ann_dict["bbox"] = [x, y, w, h]
                ann_dict["score"] = score
                ann_dict["truncation"] = trun
                ann_dict["occlusion"] = occu
                ann_dicts.append(ann_dict)
                            
        data["annotations"] = ann_dicts
        dataset_dicts.append(data)
    
    return dataset_dicts


def get_syn_dicts(dataset_root, used_chars, used_poses, sampling_rate_angle=10, min_wh=10):
       
    syn_dicts = []
    
    names = []
    for i in used_chars:
        for j in used_poses:
            if j != "":
                names.append("%s_%s" % (i, j))
            else:
                names.append("%s" % i)

    for name in names:
        # Load the dataset
        dataset_name = "_".join(["desert", name])
        json_file = os.path.join(dataset_root, dataset_name, "synthdata.json")            
            
        with open(json_file) as f_in:
            dataset_dicts = json.load(f_in)
        
        # Create the new dataset in detectron2 format
        syn_dicts_n = []
        for data in dataset_dicts:
            trial_id = data["image"].split("/")[0]
            img_name = data["image"].split("/")[1]                
                
            A = str(int(img_name.split("_")[5])-1) if name == "scott" else img_name.split("_")[5]
            if int(A) % sampling_rate_angle == 0:
                # create the annotation dicts list
                ann_dicts = [] 
                for ant in data["annotations"]:
                    # create the annotation dict
                    ann_dict = {}                    
                    ann_dict["category_id"] = 0
                    ann_dict["bbox_mode"] = BoxMode.XYWH_ABS
                    
                    bbox_w = ant["coordinates"]["width"]
                    bbox_h = ant["coordinates"]["height"]
                    bbox_minx = ant["coordinates"]["x"] - bbox_w/2
                    bbox_miny = ant["coordinates"]["y"] - bbox_h/2
                        
                    ann_dict["bbox"] = [bbox_minx, bbox_miny, bbox_w, bbox_h]
                   
                    # bbox must larger than certain size
                    if bbox_w <= min_wh or bbox_h <= min_wh:
                        continue
                    ann_dicts.append(ann_dict)
                    
                # ignore the image if there is no valid annotation
                if len(ann_dicts) == 0:
                    continue
                    
                # create the image dict    
                img_dict = {}
                img_dict["file_name"] = os.path.join(dataset_root, dataset_name, trial_id, img_name)                    
                img_dict["height"] = 512
                img_dict["width"] = 512
                img_dict["image_id"] = trial_id + "_" + img_name.replace(".jpg", "") + "_" + name
                img_dict["type"] = "syn"                 
                img_dict["annotations"] = ann_dicts
                syn_dicts_n.append(img_dict)
            
        syn_dicts = syn_dicts + syn_dicts_n
                
    return syn_dicts


def init_dataset(opt):
    
    random.seed(5271) 

    # Setups
    det_output_path = os.path.join("./datasets", opt.exp_id, "det", "iter_0")
    md_output_path = os.path.join("./datasets", opt.exp_id, "md", "iter_0")

    # Create output paths
    os.makedirs(det_output_path, exist_ok=True)
    os.makedirs(md_output_path, exist_ok=True)

    # Get dataset dictionaries
    ## Real Dataset
    print("Processing %s...\n" % opt.real_dataset_format)
    if opt.real_dataset_format == "icg":
        dataset_dicts_100 = get_icg_dicts(dataset_root=opt.real_dataset_root, mode="Train")
    elif opt.real_dataset_format == "oku":
        dataset_dicts_100 = get_oku_dicts(dataset_root=opt.real_dataset_root, mode="Train-Set", 
                                          sampling_rate=10, scaling_ratio=3, ignore_occ=False, ignore_lost=True)
    else:
        dataset_dicts_100 = get_vis_dicts(dataset_root=opt.real_dataset_root, mode="train")

    ## Subsampling
    if opt.real_dataset_spl_type == "per":
        dataset_dicts_x = random.sample(dataset_dicts_100, int(len(dataset_dicts_100)*0.01*opt.real_dataset_spl_num))
    elif opt.real_dataset_spl_type == "abs":
        dataset_dicts_x = random.sample(dataset_dicts_100, opt.real_dataset_spl_num)
    else:
        dataset_dicts_x = dataset_dicts_100
    
    ## Analysis
    count_dicts(dataset_dicts_100, "total")
    count_dicts(dataset_dicts_x, "selected")

    # Archangel-Synthetic
    print("Processing Archangel-Synthetic...\n")
    syn_dataset_dicts = get_syn_dicts(dataset_root=opt.syn_dataset_root, 
                                      used_chars=opt.syn_chars, used_poses=opt.syn_poses, 
                                      sampling_rate_angle=opt.syn_smp_angles, min_wh=opt.syn_min_wh)

    # Save Dataset Dictionaries
    ## Archangel-Synthetic
    dump_dicts(syn_dataset_dicts, md_output_path, "dicts.json")

    ## Real Dataset
    dump_dicts(dataset_dicts_x, det_output_path, "dicts.json")
    dump_dicts(dataset_dicts_x, det_output_path, "dicts_no_gan.json")
    dump_dicts(syn_dataset_dicts, det_output_path, "dicts_syn.json")
    dump_dicts(dataset_dicts_x + syn_dataset_dicts, det_output_path, "dicts_real_syn.json")
    

def update_dataset(opt):
    # Setup paths
    det_dataset_root = os.path.join("./datasets", opt.exp_id, "det")
    gan_dataset_root = os.path.join("./datasets", opt.exp_id, "gan")
    gan_results_root = os.path.join("./results", opt.exp_id, "gan")
    md_dataset_root = os.path.join("./datasets", opt.exp_id, "md")

    # Update Rt
    ## Crop2Image transformation
    ### Setup input path
    cur_trainA_path = os.path.join(gan_dataset_root, opt.cur_ptl_iter)
    cur_gan_results_path = os.path.join(gan_results_root, opt.cur_ptl_iter, "A")

    ### Setup output path
    output_img_path = os.path.join(gan_results_root, opt.cur_ptl_iter, "A_bg")
    Path(output_img_path).mkdir(parents=True, exist_ok=True)

    ### Crop2Image
    cur_trainA_valid = []
    cur_trainA = get_det_dicts(dataset_path=cur_trainA_path, name="trainA.json")
    for data in tqdm(cur_trainA):    
        # Make sure that size of the bbox is larger than min_bbox_hw
        x, y, w, h = data["annotations"][0]["bbox"]
        if w<opt.min_bbox_hw_gan or h<opt.min_bbox_hw_gan:
            continue
    
        cur_trainA_valid.append(data)
    
        img_path, img_name = os.path.split(data["file_name"])
        img_path = os.path.split(img_path)[0]
    
        # Load the image, mask and gan result
        img = cv2.imread(data["file_name"])
        mask = cv2.imread(os.path.join(img_path, "mask", img_name.replace("img", "id")), 0)
        transformed_crop = cv2.imread(os.path.join(cur_gan_results_path, data["image_id"] + ".jpg"))
    
        # Filter the mask
        mask[mask<50] = 0
    
        # Make sure that all the coordinates are integer
        x, y, w, h = round(x), round(y), round(w), round(h)
    
        # scale to the target height/width
        if h>w:
            scale = opt.target_hw_gan / h
            new_bbox_h, new_bbox_w = opt.target_hw_gan, round(scale * w)
        else:
            scale = opt.target_hw_gan / w
            new_bbox_h, new_bbox_w = round(scale * h), opt.target_hw_gan 
    
        # padding
        delta_w = opt.target_hw_gan - new_bbox_w
        delta_h = opt.target_hw_gan - new_bbox_h
        top, bottom = delta_h//2, delta_h-(delta_h//2)
        left, right = delta_w//2, delta_w-(delta_w//2)
    
        # Remove the paddings
        transformed_crop = transformed_crop[top:opt.target_hw_gan-bottom, left:opt.target_hw_gan-right]
    
        # Crop the image and mask
        img_crop = img[y:y+h, x:x+w]
        mask_crop = mask[y:y+h, x:x+w]
    
        # Resize the transformed crop
        transformed_crop = cv2.resize(transformed_crop, (w, h), interpolation=cv2.INTER_LINEAR)
    
        # Fusion
        transformed_crop[mask_crop==0] = img_crop[mask_crop==0]
        img[y:y+h, x:x+w] = transformed_crop
    
        cv2.imwrite(os.path.join(output_img_path, data["image_id"] + ".jpg"), img)

    ## Update Rt
    ### Setup input path
    cur_det_dataset_path = os.path.join(det_dataset_root, opt.cur_ptl_iter)
    cur_det_dataset = get_det_dicts(dataset_path=cur_det_dataset_path)
    
    cur_trainA_no_gan = copy.deepcopy(cur_trainA_valid)
    cur_det_dataset_no_gan = get_det_dicts(dataset_path=cur_det_dataset_path, name="dicts_no_gan.json")
    next_det_dataset_no_gan = cur_det_dataset_no_gan + cur_trainA_no_gan

    ### Update file path
    for data in tqdm(cur_trainA_valid):
        data["file_name"] = os.path.join("./results/" + opt.exp_id + "/gan", opt.cur_ptl_iter, "A_bg", data["image_id"] + ".jpg")

    ### Next det dataset
    next_det_dataset = cur_det_dataset + cur_trainA_valid

    ### Output
    next_iter = "iter_" + str([int(i) + 1 for i in opt.cur_ptl_iter.split("_") if i.isdigit()][0])
    output_det_dataset_path = os.path.join(det_dataset_root, next_iter)
    Path(output_det_dataset_path).mkdir(parents=True, exist_ok=True)

    with open(os.path.join(output_det_dataset_path, "dicts.json"), "w") as f:
        json.dump(next_det_dataset, f)
    
    with open(os.path.join(output_det_dataset_path, "dicts_no_gan.json"), "w") as f:
        json.dump(next_det_dataset_no_gan, f)
        
    # Update Vt
    ## Load Vt
    cur_md_dataset_path = os.path.join(md_dataset_root, opt.cur_ptl_iter)
    cur_md_dataset = get_det_dicts(dataset_path=cur_md_dataset_path)

    ## Load Cvt
    cur_trainA = get_det_dicts(dataset_path=cur_trainA_path, name="trainA.json")
    
    ## Next md dataset
    nxt_md_dataset = [x for x in cur_md_dataset if x not in cur_trainA]
    
    ## Output
    output_md_path = os.path.join(md_dataset_root, next_iter)
    Path(output_md_path).mkdir(parents=True, exist_ok=True)
    dump_dicts(nxt_md_dataset, output_md_path, "dicts.json")