import torch
import numpy as np
import argparse
import os
from tqdm import tqdm
import pickle
from PIL import Image

from data.dataset import build_dataset
from utils.boxes import postprocess
from utils.evaluate import get_area, get_intersection_area_matrix

def parse_args():
    parser = argparse.ArgumentParser(description='evaluate net')
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--threshold", type=float, help='threshold', default=0.5)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--gt-boxes-cover-threshold", type=float, help='iou cover threshold', default=0.7)
    args = parser.parse_args()
    return args

class PredictDictLoader:
    def __init__(self, predict_dict_dir):
        self.predict_dict_dir = predict_dict_dir
        self.load()
        
    def load(self):
        self.predict_dict = {}
        for filename in os.listdir(self.predict_dict_dir):
            path = os.path.join(self.predict_dict_dir, filename)
            with open(path, 'rb') as f:
                self.predict_dict.update(pickle.load(f))

    def __getitem__(self, idx):
        return self.predict_dict[idx]

class PredictionDataset:
    def __init__(self, dataset_names, predict_dict):
        self.dataset_dicts = build_dataset(*dataset_names)
        self.predict_dict = predict_dict
    
    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        bboxes = self.predict_dict[filename]['bboxes']
        classification = self.predict_dict[filename]['classification']
        return filename, gt_boxes, bboxes, classification

    def __iter__(self):
        for i in range(len(self)):
            yield self[i]


def generate_negative_sample(dataset, output_dir, **kwargs):
    count = 0
    for filename, gt_boxes, bboxes, classification in tqdm(dataset, desc="threshold={:.4f}".format(kwargs['threshold']), leave=False):
        data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
        proposals = data['rois']

        proposals_area = get_area(proposals)
        gt_boxes_area = get_area(gt_boxes)
        intersection_area_matrix = get_intersection_area_matrix(proposals, gt_boxes)
        #union_area_matrix = proposals_area.reshape(-1,1) + gt_boxes_area.reshape(1,-1) - intersection_area_matrix

        
        matrix = intersection_area_matrix / gt_boxes_area.reshape(1,-1)
        matrix = (matrix > kwargs['gt_boxes_cover_threshold'])
        proposals_hit = matrix.any(axis=1)
        
        negative_boxes = proposals[~proposals_hit]
        if len(negative_boxes) > 0:
            image = Image.open(filename)
            for box in negative_boxes:
                try:
                    output_path = os.path.join(output_dir, '{:0>8}{}'.format(count, os.path.splitext(filename)[-1]))
                    box_image = image.crop(box)
                    box_image.save(output_path)
                except:
                    continue
                count += 1

if __name__ == '__main__':
    args = parse_args()

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]
    
    print("loading predict dict, it may take a few minutes ... ", end='', flush=True)
    predict_dict_dir = os.path.join('output','predict', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint))
    predict_dict = PredictDictLoader(predict_dict_dir)
    print("load done", flush=True)

    dataset = PredictionDataset(test_datasets, predict_dict)
    
    kwargs = {
        'threshold': args.threshold,
        'nms_threshold': args.nms_threshold,
        'gt_boxes_cover_threshold': args.gt_boxes_cover_threshold,
    }

    output_dir = os.path.join('output','negative_sample', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint))
    os.makedirs(output_dir, exist_ok=True)

    generate_negative_sample(dataset, output_dir, **kwargs)

    print("negative samples save at {}".format(output_dir))
    print("done")




