import torch
import numpy as np
from PIL import Image
import time
import os
from tqdm import tqdm
import random
from pprint import pprint
import argparse

from data.dataset import build_dataset
from data.preprocess import preprocess_with_params
from engine import Predictor
from utils.boxes import postprocess
from utils.config import get_test_cfg

def parse_args():
    parser = argparse.ArgumentParser(description="speed test")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument('-n', "--num", type=int, help='number of images', default=1000)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--threshold", type=float, help='threshold', default=0.5)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    args = parser.parse_args()
    return args

class TinyDataset:
    def __init__(self, dataset_names, max_size):
        self.dataset_dicts = build_dataset(*dataset_names)
        self.dataset_dicts = random.sample(self.dataset_dicts, max_size)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        image = Image.open(filename)
        preprocess_method = self.dataset_dicts[index]['preprocess']
        params = self.dataset_dicts[index]['params']
        return image, preprocess_method, params

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]


if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    # select dataset
    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    # select checkpoint
    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    # set device
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    # set predictor
    predictor = Predictor(cfg)
    
    # generate batch dataset
    dataset = BatchDataset(TinyDataset(test_datasets, args.num), batch_size=args.batch_size, drop_last=True)

    # prepare time record
    record = {
        'preprocess_time': 0,
        'model_time': 0,
        'postprocess_time': 0,
        'total_time': 0,
        'count': 0,
        'batch_size': args.batch_size,
        'gpus': cfg.MODEL.DEVICES,
    }

    # predict the dataset
    for image_list, preprocess_method_list, params_list in tqdm(dataset):        
        time1 = time.time()
        
        image_list = [preprocess_with_params(image, preprocess_method=preprocess_method, params=params) for image, preprocess_method, params in zip(image_list, preprocess_method_list, params_list)]
        
        time2 = time.time()

        data_dict = predictor(image_list)

        time3 = time.time()

        data_list = [postprocess(boxes, classification, args.threshold, args.nms_threshold) for boxes, classification in zip(data_dict['bboxes'], data_dict['classification'])]

        time4 = time.time()

        record['preprocess_time'] += time2 - time1
        record['model_time'] += time3 - time2
        record['postprocess_time'] += time4 - time3
        record['total_time'] += time4 - time1
        record['count'] += len(image_list)

    for time_name in ['preprocess_time', 'model_time', 'postprocess_time', 'total_time']:
        record[time_name] /= record['count']

    pprint(record)
