from ais.core import *
import sys
import logging
from pathlib import Path
import torch
import numpy as np
from ais.infer import BaseInfer
from ais.model import LandMarkVertebraDecoder
from ais.image import cv_resize_shape
from ais.data import sort_any_curve_points

class KeypointInfer(BaseInfer):
    def __init__(self, module, device, gpus, scale, **kwargs):
        """
        关键点推理
        :param module: net model
        :param device: device
        :param gpus: numbers of gpu
        :param scale: network input shape
        :param kwargs:
        """
        super(KeypointInfer, self).__init__(module, device, gpus)
        self._module: torch.nn.Module = module
        self.device = device
        self.gpus = gpus
        self.decoder = LandMarkVertebraDecoder(num=17, conf_thresh=0.2)
        self.down_ratio = kwargs['down_ratio']
        self._module = self._module.to(self.device)
        self.logger = None
        self.scale = scale
        # if self.gpus > 1:
        #     self._module = torch.nn.DataParallel(self._module, device_ids=range(self.gpus))


    def pre_process(self, image:NPImage):
        """
        预处理
        1）尺度缩放
        2）归一化
        3) convert tensor
        :param image:
        :return:
        """
        image = cv_resize_shape(image, self.scale)
        out_image = image.astype(np.float32) / 255.
        out_image = out_image - 0.5
        out_image = out_image.transpose(2, 0, 1)
        out_image = torch.from_numpy(out_image).unsqueeze_(0)
        return out_image

    def post_process(self, output):
        """
        后处理
        :param output:
        :return:
        """
        out = self.decoder.det_decode(output['hm'], output['wh'], output['reg'])
        pts = out[:, :10].copy()
        pts *= self.down_ratio
        pts = np.asarray(pts, np.float32)
        sort_ind = np.argsort(pts[:, 1])
        pts = pts[sort_ind]
        landmark = pts[:, 2:10]
        # landmark = landmark.reshape((17 * 4, 2))

        point_dicts = {}
        for i in range(landmark.shape[0]):
            pt_list = []
            for j in range(4):
                pt_list.append(landmark[i,:][j*2 : (j+1)*2])
            point_dicts[i+1] = pt_list
        sort_points = sort_any_curve_points(point_dicts)
        return sort_points

    def process(self, image:NPImage):
        """
        关键点检测接口
        :param image:
        :return:
        """
        input_shape = image.shape[:2]
        input_tensor = self.pre_process(image)
        output_dict = self.inference(input_tensor)
        landmark = self.post_process(output_dict)

        ratio = np.array(self.scale[::-1]).astype(np.float) / np.array(input_shape[::-1]).astype(np.float)
        output_landmark = landmark / ratio

        return output_landmark
