from abc import ABC, abstractmethod
from ais.core import *
import sys
import logging
from pathlib import Path
import torch
import numpy as np
from ais.utils import setup_logger


class BaseInfer(ABC):
    def __init__(self, model,
                 device,
                 gpus,
                 **kwargs):
        """

        :param model:
        :param gpus:
        :param distributed:
        """
        super(BaseInfer, self).__init__()
        self._module:torch.nn.Module = model
        self.gpus = gpus
        self.device:torch.device = device
        vars(self).update(kwargs)

    def inference(self, batch:TensorImage):
        self._module.eval()
        with torch.no_grad():
            batch = batch.to(self.device)
            output:TensorImage = self._module(batch)
            return output

    @abstractmethod
    def pre_process(self, batch):
        """

        :param batch:
        :return:
        """

    @abstractmethod
    def post_process(self, batch):
        """

        :param batch:
        :return:
        """
    @abstractmethod
    def process(self, image: NPImage):
        """

        :param image:
        :return:
        """
        input_shape = image.shape[:2]
        input_tensor = self.pre_process(image)
        output_dict = self.inference(input_tensor)
        mask = self.post_process(output_dict, input_shape)
        return mask.squeeze().cpu().numpy().astype(np.uint8)

    def init(self, checkpoint_dir, logger_file):
        """
        加载模型
        :param checkpoint_dir: pt file path
        :param logger_file: logger file path
        :return:
        """
        self.logger = setup_logger(self.__class__.__name__, level=logging.INFO, filepath=logger_file)
        f_params =Path(checkpoint_dir)
        if not f_params.exists():
            self.logger.error("{} not exists!".format(str(f_params)))
            sys.exit(1)

        try:
            map_location = torch.device(self.device)
            state_dict = torch.load(str(f_params), map_location=map_location)
            self._module.load_state_dict(state_dict['module_dict'])
            self.logger.info("load checkpoint successed: {}".format(str(f_params)))
        except Exception as e:
            self.logger.error("load checkpoint failed: {}".format(str(e)))
            sys.exit(1)

