import torch
import numpy as np
import os
from tqdm import tqdm
import argparse
import pickle
import shutil
import copy
from pprint import pprint
import matplotlib.pyplot as plt
from pathlib import Path

from data.utils import read_image
from data.dataset import build_dataset
from engine import Predictor
from utils.config import get_test_cfg
from utils.boxes import postprocess
from utils.evaluate import get_area, get_intersection_area_matrix, evaluate_matrix2, get_precision, get_recall


def parse_args():
    parser = argparse.ArgumentParser(description="predict net")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    parser.add_argument("--threshold", type=float, help='threshold', default=0.2)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    args = parser.parse_args()
    return args

class FullDataset:
    def __init__(self, dataset_names):
        self.dataset_dicts = build_dataset(*dataset_names)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        gt_boxes = np.array(gt_boxes).reshape(-1, 4)
        gt_classification = [obj['category_id'] for obj in self.dataset_dicts[index]['annotations']]
        gt_classification = np.array(gt_classification, dtype=np.uint8)
        image = read_image(filename, preprocess_method=self.dataset_dicts[index]['preprocess'], params=self.dataset_dicts[index]['params'])
        return filename, image, gt_boxes, gt_classification

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]

class Saver:
    def __init__(self, save_dir):
        os.makedirs(save_dir, exist_ok=True)
        self.groundtruth_path = os.path.join(save_dir, 'groundtruth.txt')
        self.detection_path = os.path.join(save_dir, 'detection.txt')
        
        self.detection_file = open(self.detection_path, 'w', encoding='utf-8')
        self.groundtruth_file = open(self.groundtruth_path, 'w', encoding='utf-8')

    def save(self, image_id, detection_boxes, detection_scores, groundtruth_boxes):
        """
        Args:
            image_id (str):
            detection_boxes (np.ndarray): N x 4
            detection_scores (np.ndarray): N
            groundtruth_boxes (np.ndarray): M x 4
        """
        data = ';'.join(["{},{},{},{},{}".format(*detection_boxes[i], detection_scores[i]) for i in range(detection_scores.shape[0])])
        self.detection_file.write("{} {}\n".format(image_id, data))

        data = ';'.join(["{},{},{},{}".format(*groundtruth_boxes[i]) for i in range(groundtruth_boxes.shape[0])])
        self.groundtruth_file.write("{} {}\n".format(image_id, data))

    def close(self):
        self.detection_file.close()
        self.groundtruth_file.close()


def predict_and_save(predictor, dataset, saver, **kwargs):
    for filename_list, image_list, gt_boxes_list, gt_classification_list in tqdm(dataset):
        data_dict = predictor(image_list)
        
        for filename, gt_boxes_all_classes, gt_classification, bboxes, classification in zip(filename_list, gt_boxes_list, gt_classification_list, data_dict['bboxes'], data_dict['classification']):
            data = postprocess(bboxes, classification, kwargs['threshold'], kwargs['nms_threshold'])
            
            class_id = 0  # only person
            proposals = data['rois'][data['class_ids'] == class_id]
            scores = data['scores'][data['class_ids'] == class_id]
            gt_boxes = gt_boxes_all_classes[gt_classification == class_id]

            saver.save(filename, proposals, scores, gt_boxes)

            del proposals
            del scores
            del gt_boxes


if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    predictor = Predictor(cfg)

    dataset = BatchDataset(FullDataset(test_datasets), batch_size=args.batch_size, drop_last=False)

    output_dir = os.path.join('output', 'result_for_matlab_lamr', '{}/[train]-{}/[val]-{}/[test]-{}/[checkpoint]-{}-[thres]-{}-[nms]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint, args.threshold, args.nms_threshold))
    os.makedirs(output_dir, exist_ok=True)

    saver = Saver(output_dir)

    kwargs = {
        'threshold': args.threshold,
        'nms_threshold': args.nms_threshold,
    }

    predict_and_save(predictor, dataset, saver, **kwargs)

    saver.close()

    print("save evaluate dict at {}".format(output_dir))
    print("done")
