#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import acl
import numpy as np
sys.path.append("..")
from base.model import AscendModel
from base.dvpp import DvppInfo
from common import const
from common.log import Log, check_ret
from resource.resource import ascend_resource


class Inference():
    def __init__(self, context, stream, model_path,
                model_input_height, model_input_width,
                image_height, image_width, thread_arrange):
        self.class_name = self.__class__.__name__
        self.context = context
        self.stream = stream
        self.model_path = model_path
        self.model_input_height = model_input_height
        self.model_input_width = model_input_width
        self.image_height = image_height
        self.image_width = image_width
        self.thread_arrange = thread_arrange
        self.memory_pool = ascend_resource.memory_pool
        self.model = None
        self.input_data = None
        self.input_info_data = None
        self.input_size_first = 0
        self.input_size_sec = 0
        self.init_inference()
        self.send_info_to_model()

    def init_inference(self):
        # init model
        self.model = AscendModel(self.context, self.stream, self.model_path)
        # check model input width and height
        dims, ret = acl.mdl.get_input_dims(self.model.model_desc, 0)
        check_ret(self.class_name, 'init_inference', 'fail to get dims', ret)
        if dims["dims"][2] != self.model_input_height or\
           dims["dims"][3] != self.model_input_width:
            check_ret(self.class_name, 'init_inference', 
                'model input height[{}] and widht[{}] != {} and {}'.\
                format(self.model_input_height, self.model_input_height, \
                dims["dims"][2], dims["dims"][3]), ret)
        # prepare model input place for image
        self.input_size_first = \
            acl.mdl.get_input_size_by_index(self.model.model_desc, 0)
        self.input_data = \
            self.memory_pool.memory_allocation_(self.input_size_first, 
                                                const.DEVICE)
        # for image info
        self.input_size_sec = \
            acl.mdl.get_input_size_by_index(self.model.model_desc, 1)
        self.input_info_data = \
            self.memory_pool.memory_allocation_(self.input_size_sec, 
                                                const.DEVICE)

    def send_info_to_model(self):
        # send image info to device(only for static batch and resolution mode)
        data = [self.model_input_height, self.model_input_width, 
                self.model_input_height, self.model_input_width]
        batch_data = []
        for j in range(0, self.thread_arrange):
            batch_data.extend(data)
        np_data = np.asarray(batch_data, dtype=np.float32)
        ptr = acl.util.numpy_to_ptr(np_data)
        size = np_data.size * np_data.itemsize
        ret = acl.rt.memcpy(self.input_info_data, size, ptr, size, 
                        const.ACL_MEMCPY_HOST_TO_DEVICE)
        check_ret(self.class_name, 'inference', 
                'fail to memcpy from host to device', ret)

    def send_image_to_model(self, yuv_dev: DvppInfo):
        for j in range(0, self.thread_arrange):
            ret = acl.rt.memcpy(self.input_data + j * yuv_dev[j][1], 
                                yuv_dev[j][1],
                                yuv_dev[j][0], 
                                yuv_dev[j][1],
                                const.ACL_MEMCPY_DEVICE_TO_DEVICE)
            check_ret(self.class_name, 'inference', 
                'fail to memcpy from device to device', ret)
            # free yuv_device
            self.memory_pool.release_memory(yuv_dev[j][0], const.DVPP)

    def get_model(self):
        return self.model

    def run_inference(self, yuv_dev: DvppInfo):
        # send image to model
        self.send_image_to_model(yuv_dev)
        # infer
        infer_result = self.model.run(const.STATIC_MODE,
                [self.input_data, self.input_info_data],
                [self.input_size_first, self.input_size_sec])
        return infer_result

    def release_inference(self):
        if self.model:
            self.model.release_model_resource(self.context)
        if self.input_data:
            self.memory_pool.release_memory(self.input_data, const.DEVICE)
        if self.input_info_data:
            self.memory_pool.release_memory(self.input_info_data, const.DEVICE)
