import torch
import numpy as np
import os
from tqdm import tqdm
import argparse
import pickle
import shutil

from data.utils import read_image
from data.dataset import build_dataset
from engine import Predictor
from utils.config import get_test_cfg

def parse_args():
    parser = argparse.ArgumentParser(description="predict net")
    parser.add_argument("--model", type=str, help='model name', required=True)
    parser.add_argument("--datasets", type=str, help='train datasets name', required=True)
    parser.add_argument("--val-datasets", type=str, help='val datasets name', required=True)
    parser.add_argument("--test-datasets", type=str, help='datasets for sampling', required=True)
    parser.add_argument("--checkpoint", type=str, help="checkpoint iter, can be 'last', 'best', iternumber", default='best')
    parser.add_argument("--gpus", type=str, help="use gpus id", default='0')
    parser.add_argument("--batch-size", type=int, help='batch size', default=16)
    args = parser.parse_args()
    return args

class FullDataset:
    def __init__(self, dataset_names):
        self.dataset_dicts = build_dataset(*dataset_names)

    def __len__(self):
        return len(self.dataset_dicts)

    def __getitem__(self, index):
        filename = self.dataset_dicts[index]['file_name']
        image = read_image(filename, preprocess_method=self.dataset_dicts[index]['preprocess'], params=self.dataset_dicts[index]['params'])
        return filename, image

class BatchDataset:
    def __init__(self, dataset, batch_size, drop_last=False):
        self.dataset = dataset
        self.drop_last = drop_last
        self.batch_size = batch_size

        assert len(self) > 0

    def __len__(self):
        length = len(self.dataset) // self.batch_size
        if not self.drop_last and len(self.dataset) % self.batch_size != 0:
            length += 1
        return length

    def __getitem__(self, index):
        start, stop, step = slice(index * self.batch_size, (index + 1) * self.batch_size).indices(len(self.dataset))
        return zip(*[self.dataset[i] for i in range(start, stop, step)])
        
    def __iter__(self):
        for i in range(len(self)):
            yield self[i]

class Saver:
    """ iterative save helper for avoiding saving too much data with pickle """
    def __init__(self, output_dir, save_length):
        self.output_dir = output_dir
        self.save_length = save_length
        self.length = 0
        self.iter = 0
        self.save_dict = {}
        self.clear()

    def clear(self):
        if os.path.exists(self.output_dir):
            shutil.rmtree(self.output_dir)
    
    def update(self, input_dict):
        self.save_dict.update(input_dict)
        self.length += 1
        if self.length % self.save_length == 0:
            self._save()
    
    def _save(self):
        output_path = os.path.join(self.output_dir, '{:0>8}.pkl'.format(self.iter))
        os.makedirs(os.path.dirname(output_path), exist_ok=True)
        with open(output_path, 'wb') as f:
            pickle.dump(self.save_dict, f)

        del self.save_dict
        self.save_dict = {}
        self.iter += 1
    
    def done(self):
        if self.length % self.save_length != 0:
            self._save()

def predict(predictor, dataset, saver):
    for filename_list, image_list in tqdm(dataset):
        data_dict = predictor(image_list)
        batch_predict_dict = {
            filename: {
                'bboxes': bboxes.cpu(),
                'classification': classification.cpu(),
            }
            for filename, bboxes, classification in zip(filename_list, data_dict['bboxes'], data_dict['classification'])
        }
        saver.update(batch_predict_dict)
    saver.done()


if __name__ == '__main__':
    args = parse_args()
    config_path = 'configs/model/{}.yaml'.format(args.model)
    cfg = get_test_cfg(config_path)

    train_datasets = ["{}-train".format(dataset) for dataset in args.datasets.split(',')]
    val_datasets = ["{}-test".format(dataset) for dataset in args.val_datasets.split(',')]
    test_datasets = ["{}-test".format(dataset) for dataset in args.test_datasets.split(',')]

    cfg.OUTPUT_DIR = os.path.join(cfg.OUTPUT_DIR, "checkpoint", "{}/[train]-{}/[val]-{}".format(args.model, '&'.join(train_datasets), '&'.join(val_datasets)))
    
    if args.checkpoint == 'best':
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_best.pth')
    elif args.checkpoint == 'last':
        cfg.MODEL.WEIGHTS = None
    else:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_{:0>8}.pth'.format(args.checkpoint))
    
    cfg.MODEL.DEVICES = ["cuda:{}".format(gpu) for gpu in args.gpus.split(',')]

    predictor = Predictor(cfg)

    dataset = BatchDataset(FullDataset(test_datasets), batch_size=args.batch_size, drop_last=False)

    output_dir = os.path.join('output','predict', '{}/[train]-{}/[val]-{}/[test]-{}-[checkpoint]-{}'.format(args.model, '&'.join(train_datasets), '&'.join(val_datasets), '&'.join(test_datasets), args.checkpoint))
    saver = Saver(output_dir, save_length=10000//args.batch_size)

    predict(predictor, dataset, saver)

    print("save prediction at {}".format(output_dir))
    print("done")
        

