import torch
import numpy as np
import argparse
import os
import cv2
from tqdm import tqdm
import random
import shutil

from data.utils import read_image
from data.dataset import build_dataset
from engine import Predictor
from utils.config import get_test_cfg
from utils.boxes import postprocess


def parse_args():
    parser = argparse.ArgumentParser(description="demo")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument('-n', "--num", type=int, help='number of images', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--threshold", type=float, help='threshold', default=0.5)
    parser.add_argument("--nms-threshold", type=float, help='nms threshold', default=0.5)
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    parser.add_argument("--clear", type=bool, help='clear last generation', default=True)
    args = parser.parse_args()
    return args

class TinyDataset:
    def __init__(self, dataset_names, max_size):
        self.dataset_dicts = build_dataset(*dataset_names)
        self.dataset_dicts = random.sample(self.dataset_dicts, max_size)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        image = read_image(filename, preprocess_method=self.dataset_dicts[index]['preprocess'], params=self.dataset_dicts[index]['params'])
        gt_boxes = [obj['bbox'] for obj in self.dataset_dicts[index]['annotations']]
        return filename, image, gt_boxes

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]

def draw_image(img, gt_boxes, proposals, class_ids):
    num_boxes = len(gt_boxes)
    for i in range(num_boxes):
        cv2.rectangle(img, (int(gt_boxes[i][0]), int(gt_boxes[i][1])), (int(gt_boxes[i][2]), int(gt_boxes[i][3])), (0, 255, 0), 3)

    num_boxes = len(proposals)
    for i in range(num_boxes):
        cv2.rectangle(img, (int(proposals[i][0]), int(proposals[i][1])), (int(proposals[i][2]), int(proposals[i][3])), (255, 0, 0), 3)
        cv2.putText(img, str(class_ids[i]), (int(proposals[i][0]), int(proposals[i][1]) - 6), cv2.FONT_HERSHEY_COMPLEX, 0.5, (255, 0, 0), 2)    
    
    return img
    

if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    predictor = Predictor(cfg)

    dataset = BatchDataset(TinyDataset(test_datasets, args.num), batch_size=args.batch_size, drop_last=False)
    
    output_dir = os.path.join('output','draw', '{}/[train]-{}/[val]-{}/[test]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets)))
    if os.path.exists(output_dir):
        shutil.rmtree(output_dir)
    os.makedirs(output_dir, exist_ok=True)

    count = 0
    for filename_list, image_list, gt_boxes_list in tqdm(dataset):
        data_dict = predictor(image_list)
        data_list = [postprocess(boxes, classification, args.threshold, args.nms_threshold) for boxes, classification in zip(data_dict['bboxes'], data_dict['classification'])]

        for filename, gt_boxes, data in zip(filename_list, gt_boxes_list, data_list):
            count += 1
            image = cv2.imread(filename)
            image = draw_image(image, gt_boxes, data['rois'], data['class_ids'])
            output_path = os.path.join(output_dir, '{:0>8}{}'.format(count, os.path.splitext(filename)[-1]))
            cv2.imwrite(output_path, image)

    print("save drawing at {}".format(output_dir))
        
        
        
        

    




