from ais.core import *
import sys
import logging
from pathlib import Path
import torch
import torch.nn.functional as F
import numpy as np
from ais.infer import BaseInfer
from ais.image import cv_resize_shape, remove_small_objects


class SegInfer(BaseInfer):
    def __init__(self, module, device, gpus, scale, stats, **kwargs):
        """
        语义分割推理
        :param module: net model
        :param device: device
        :param gpus: numbers of gpu
        :param scale: network input shape
        :param kwargs:
        """
        super(SegInfer, self).__init__(module, device, gpus)
        self._module: torch.nn.Module = module
        self.device = device
        self.gpus = gpus
        self._module = self._module.to(self.device)
        self.logger = None
        self.scale = scale
        self.stats = stats
        # if self.gpus > 1:
        #     self._module = torch.nn.DataParallel(self._module, device_ids=range(self.gpus))

    def pre_process(self, image:NPImage):
        """
        预处理
        1）尺度缩放
        2）归一化
        3) convert tensor
        :param image:
        :return:
        """
        image = cv_resize_shape(image, self.scale, 2)
        out_image = image.astype(np.float32)
        out_image /= 255.0
        out_image = image_to_tensor(out_image, self.stats)
        return out_image.unsqueeze_(0)

    def post_process(self, output:TensorImage, input_shape):
        """
        后处理
        :param output:
        :return:
        """
        output = F.interpolate(output, size=input_shape, mode='bilinear')
        output = F.softmax(output, 1, _stacklevel=5)
        predicted_mask = torch.argmax(output, dim=1)
        # predicted_mask = predicted_mask.squeeze().cpu().numpy().astype(np.uint8)
        return predicted_mask

    def process(self, image:NPImage):
        """
        分割处理接口
        :param image:
        :return:
        """
        input_shape = image.shape[:2]
        input_tensor = self.pre_process(image)
        output_dict = self.inference(input_tensor)
        mask = self.post_process(output_dict, input_shape)
        return mask.squeeze().cpu().numpy().astype(np.uint8)
