import gc
from pathlib import Path
import shutil
from tempfile import TemporaryDirectory

import tqdm
import torch
from ais_bench.infer.interface import InferSession
import onnxruntime as ort

from mmdet.evaluation import get_classes
from mmdet.structures import DetDataSample
from mmdet.utils import setup_cache_size_limit_of_dynamo
from mmengine.config import Config
from mmengine.evaluator import Evaluator
from mmengine.registry import EVALUATOR, init_default_scope
from mmengine.runner import Runner, autocast
from tqdm import tqdm

from ..postprocess import PostProcessor
from ..preprocess import ImagePreprocessor
from ..utils import DataHelper, create_data_sample
from .base_inferencer import BaseInferencer


def build_offline_evaluator(cfg):
    init_default_scope(cfg.get('default_scope'))
    evaluator_cfg = cfg.test_evaluator
    if 'metrics' in evaluator_cfg:
        evaluator_cfg.setdefault('type', 'Evaluator')
        evaluator = EVALUATOR.build(evaluator_cfg)
    else:
        evaluator = Evaluator(evaluator_cfg)
    dataset_type = cfg.test_dataloader.dataset.type
    evaluator.dataset_meta = {'classes': tuple([
        x.replace('_', ' ')
        for x in get_classes(dataset_type.strip('Dataset').lower())
    ])}
    return evaluator


def offline_evaluate(config_path, om_path):

    cfg = Config.fromfile(config_path)
    inferencer = BaseInferencer(cfg, om_path, device=0)
    evaluator = build_offline_evaluator(cfg)
    cfg.test_dataloader.batch_size = inferencer.batch_size

    data_root = cfg.test_dataloader.dataset.data_root
    data_dir = Path(data_root) / cfg.test_dataloader.dataset.data_prefix['img']
    data_helper = DataHelper(str(data_dir), inferencer.batch_size)

    for batch_images in tqdm(data_helper.batch_list, desc='Inference',
                            total=data_helper.num_batches):
        data_samples = inferencer.predict(batch_images)
        for d in data_samples:
            d.set_metainfo({'img_id': int(Path(d.img_path).stem)})
        evaluator.process(
            data_samples=data_samples,
            data_batch={'data_samples': data_samples}
        )

    metrics = evaluator.evaluate(data_helper.num_images)
    print(metrics)
    return metrics


def online_evaluate(config_path, ckpt_path, batch_size=1):
    setup_cache_size_limit_of_dynamo()
    cfg = Config.fromfile(config_path)
    cfg.test_dataloader.batch_size = batch_size
    cfg.visualizer = None
    cfg.launcher = 'none'
    tmp_dir = TemporaryDirectory()
    cfg.work_dir = tmp_dir.name
    cfg.load_from = ckpt_path
    runner = Runner.from_cfg(cfg)
    test_loop = runner.build_test_loop(runner._test_loop)
    evaluator = runner.test_evaluator

    runner.call_hook('before_run')
    runner.load_or_resume()
    runner.call_hook('before_test')
    runner.call_hook('before_test_epoch')

    model = runner.model
    model.eval()
    with torch.no_grad():
        for idx, data_batch in tqdm(enumerate(test_loop.dataloader)):
            runner.call_hook('before_test_iter', batch_idx=idx,
                             data_batch=data_batch)
            with autocast(enabled=test_loop.fp16):
                data_batch = model.data_preprocessor(data_batch, False)
                results = model(**data_batch, mode='predict')
            evaluator.process(data_samples=results, data_batch=data_batch)
            runner.call_hook('after_test_iter', batch_idx=idx,
                             data_batch=data_batch, outputs=results)
    metrics = evaluator.evaluate(len(test_loop.dataloader.dataset))

    runner.call_hook('after_test_epoch', metrics=metrics)
    runner.call_hook('after_test')
    runner.call_hook('after_run')
    tmp_dir.cleanup()

    return metrics


def online_evaluate_with_offline_data(config_path, ckpt_path,
                                      batch_size=1,
                                      input_shape=(800, 1088)):
    setup_cache_size_limit_of_dynamo()
    cfg = Config.fromfile(config_path)
    cfg.test_dataloader.batch_size = batch_size
    cfg.visualizer = None
    cfg.launcher = 'none'
    tmp_dir = TemporaryDirectory()
    cfg.work_dir = tmp_dir.name
    cfg.load_from = ckpt_path

    runner = Runner.from_cfg(cfg)
    test_loop = runner.build_test_loop(runner._test_loop)
    evaluator = runner.test_evaluator

    runner.call_hook('before_run')
    runner.load_or_resume()
    runner.call_hook('before_test')
    runner.call_hook('before_test_epoch')

    model = runner.model
    model.eval()
    preprocessor = ImagePreprocessor(cfg, input_shape)
    data_root = cfg.test_dataloader.dataset.data_root
    data_dir = Path(data_root) / cfg.test_dataloader.dataset.data_prefix['img']
    data_helper = DataHelper(str(data_dir), batch_size)

    with torch.no_grad():
        for batch_images in tqdm(data_helper.batch_list, desc='Inference',
                                 total=data_helper.num_batches):
            inputs, img_metas = preprocessor.preprocess_batch(batch_images)
            data_samples = [
                create_data_sample(meta, img_id=int(Path(meta['img_path']).stem)
                ) for meta in img_metas
            ]
            data_batch = dict(inputs=torch.from_numpy(inputs),
                              data_samples=data_samples)
            with autocast(enabled=test_loop.fp16):
                results = model(**data_batch, mode='predict')
            evaluator.process(data_samples=results, data_batch=data_batch)
    metrics = evaluator.evaluate(data_helper.num_images)

    runner.call_hook('after_test_epoch', metrics=metrics)
    runner.call_hook('after_test')
    runner.call_hook('after_run')
    tmp_dir.cleanup()

    return metrics
