# encoding=utf-8
import torch
import torch_npu
import torch.nn as nn
from torchvision import models
import time
import cv2
import numpy


class DensenetGrade:
    def __init__(self, pth_path: str):
        self.model = models.densenet121(pretrained=True)
        self.model.classifier = nn.Linear(self.model.classifier.in_features, 5)
        self.model.load_state_dict(torch.load(pth_path, map_location=torch.device('cpu')))
        self.model = self.model.npu()
        self.model.eval()

    def softmax(self, x):
        f_x = numpy.exp(x) / numpy.sum(numpy.exp(x))
        return f_x

    @torch.no_grad()
    def infer(self, image_path: str):
        trans_dim = self._preprocess(image_path)
        result = self.model(trans_dim.npu()).cpu().numpy()
        # result = self.model(trans_dim).numpy()
        y = self.softmax(result).tolist()
        return self._postprocess(y)

    def _postprocess(self, pred_arr):
        return {
            'index': pred_arr.index(max(pred_arr)),
            'pie': pred_arr
        }

    def _preprocess(self, data_in: str):
        img_arr = self.circle_crop(data_in) / 255.0
        x = numpy.expand_dims(img_arr[:, :, ::-1].astype(numpy.float32).transpose((2, 0, 1)), axis=0)
        return torch.from_numpy(x)

    def circle_crop(self, image_src: str):
        crop_mask = self.crop_image_from_mask(image_src)
        return self.crop_image_with_gaussian(crop_mask)

    def crop_image_from_mask(self, image_src: str):
        # load
        image = cv2.imread(image_src)

        # binary
        gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        _, binary_image = cv2.threshold(gray_image, 7, 255, cv2.THRESH_BINARY)

        # cal roi
        x, y, w, h = cv2.boundingRect(binary_image)
        center = (w // 2), (h // 2)
        radius = min(center)
        y = y + center[1] - radius
        x = x + center[0] - radius
        copy_image = image[y: y + 2 * radius, x: x + 2 * radius]

        # gen mask
        mask = numpy.zeros_like(copy_image)
        cv2.circle(mask, (radius, radius), radius, (1, 1, 1), -1)

        # exposure
        return copy_image * mask

    def crop_image_with_gaussian(self, data_in: numpy.ndarray):
        ori_image = cv2.resize(data_in, (224, 224)).astype(numpy.float32)
        out = cv2.GaussianBlur(ori_image, (11, 11), 10.0)
        exposure = cv2.addWeighted(ori_image, 4, out, -4, 128)
        exposure = numpy.clip(exposure, 0, 255).astype(numpy.uint8)
        exposure = cv2.cvtColor(exposure, cv2.COLOR_BGR2RGB)
        return exposure


if __name__ == '__main__':
    """AI Core调整:
        查询: npu-smi info -t cpu-num-cfg -i 0 -c 0
        设置3个ai核心: sudo npu-smi set -t cpu-num-cfg -i 0 -c 0 -v 3:1:0
       
       torch-npu现象: 
         Ascend的pytorch API在310B上执行时会首先及时编译模型到设备
         在此设备上受限于CPU能力，此步骤会相当漫长，最后生成kernel_meta缓存文件夹
         fusion_result.json, ge_check_op.json,
         并且最后负载到npu上的内容很少，所以此设备不应用于直接调用torch_npu执行
       
         在此边缘设备上，应该在x86上将模型转换完成后，在板载端使用aci python api
         进行开发，或者ascend cpp api进行开发
       
       以下内容为纯cpu执行的数据(-v 0:4:0):
         init cost: 0.7281102529999544 s
         infer cost: 0.7388063439999542 s
       在torch_npu下，驱动执行效率不佳，基本保持着10分钟左右的执行时间，所以不统计
    """
    t1 = time.perf_counter()
    grade = DensenetGrade("state.pth")
    t2 = time.perf_counter()
    print("init cost", t2 - t1)
    # time cost
    t1 = time.perf_counter()
    print(grade.infer("1.jpg"))
    t2 = time.perf_counter()
    print("infer cost", t2 - t1)
