# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 3.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-3.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

import argparse
import glob
import json
import os
from contextlib import ExitStack
from PIL import Image
import numpy as np
from StreamManagerApi import StreamManagerApi
from StreamManagerApi import MxDataInput
from StreamManagerApi import InProtobufVector
from StreamManagerApi import MxProtobufIn
import MxpiDataType_pb2 as MxpiDataType


def resize(img, size, interpolation=Image.BILINEAR):
     if isinstance(size, int):
         w, h = img.size
         if (w <= h and w == size) or (h <= w and h == size):
             return img
         if w < h:
             ow = size
             oh = int(size * h / w)
             return img.resize((ow, oh), interpolation)
         else:
             oh = size
             ow = int(size * w / h)
             return img.resize((ow, oh), interpolation)
     else:
         return img.resize(size[::-1], interpolation)


def center_crop(img, out_height, out_width):
    height, width, _ = img.shape
    left = int((width - out_width) / 2)
    right = int((width + out_width) / 2)
    top = int((height - out_height) / 2)
    bottom = int((height + out_height) / 2)
    img = img[top:bottom, left:right]
    return img


def tranform(in_file):
    input_size = 256
    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    img = Image.open(in_file).convert('RGB')
    img = resize(img, input_size)  # transforms.Resize(256)
    img = np.array(img, dtype=np.float32)
    print("img.shape=",img.shape)
    img = center_crop(img, 224, 224)   # transforms.CenterCrop(224)
    img = img / 255.  # transforms.ToTensor()
    img[..., 0] = (img[..., 0] - mean[0]) / std[0]
    img[..., 1] = (img[..., 1] - mean[1]) / std[1]
    img[..., 2] = (img[..., 2] - mean[2]) / std[2]

    img = img.transpose(2, 0, 1)   # HWC -> CHW
    img = np.expand_dims(img, 0)
    return img


class GlobDataLoader():
    def __init__(self, glob_pattern, limit=None):
        self.glob_pattern = glob_pattern
        self.limit = limit
        self.file_list = self.get_file_list()
        self.cur_index = 0

    def get_file_list(self):
        return glob.iglob(self.glob_pattern)

    def __iter__(self):
        return self

    def __next__(self):
        if self.cur_index == self.limit:
            raise StopIteration()
        label = None
        file_path = next(self.file_list)
        with open(file_path, 'rb') as fd:
            data = fd.read()

        self.cur_index += 1
        return get_file_name(file_path), label, data


class Predictor():
    def __init__(self, pipeline_conf, stream_name):
        self.pipeline_conf = pipeline_conf
        self.stream_name = stream_name

    def __enter__(self):
        self.stream_manager_api = StreamManagerApi()
        ret = self.stream_manager_api.InitManager()
        if ret != 0:
            raise Exception(f"Failed to init Stream manager, ret={ret}")

        # create streams by pipeline config file
        with open(self.pipeline_conf, 'rb') as f:
            pipeline_str = f.read()
        ret = self.stream_manager_api.CreateMultipleStreams(pipeline_str)
        if ret != 0:
            raise Exception(f"Failed to create Stream, ret={ret}")
        self.data_input = MxDataInput()

        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        # destroy streams
        self.stream_manager_api.DestroyAllStreams()

    def predict(self, dataset):
        print("Start predict........")
        print('>' * 30)
        for name, _, data in dataset:
            self.data_input.data = data
            yield self._predict(name, self.data_input)
        print("predict end.")
        print('<' * 30)

    def _predict(self, name, data):
        protobuf_data = self._predict_gen_protobuf(name)
        self._predict_send_protobuf(self.stream_name, 0, protobuf_data)
        result = self._predict_get_result(self.stream_name, 0)
        return name, json.loads(result.data.decode())

    def _predict_gen_protobuf(self, name):
        args = parse_args()
        file_path = f"{args.glob}" + name + ".JPEG"

        img_np = tranform(file_path)
        print("*********file_path is: ", file_path)

        vision_list = MxpiDataType.MxpiVisionList()
        vision_vec = vision_list.visionVec.add()
        vision_vec.visionInfo.format = 0
        vision_vec.visionInfo.width = 224
        vision_vec.visionInfo.height = 224
        vision_vec.visionInfo.widthAligned = 224
        vision_vec.visionInfo.heightAligned = 224
        vision_vec.visionData.memType = 0
        vision_vec.visionData.dataStr = img_np.tobytes()
        vision_vec.visionData.dataSize = len(img_np)
        protobuf = MxProtobufIn()
        protobuf.key = b"appsrc0"
        protobuf.type = b'MxTools.MxpiVisionList'
        protobuf.protobuf = vision_list.SerializeToString()
        protobuf_vec = InProtobufVector()
        protobuf_vec.push_back(protobuf)
        return protobuf_vec

    def _predict_send_protobuf(self, stream_name, in_plugin_id, data):
        self.stream_manager_api.SendProtobuf(stream_name, in_plugin_id, data)

    def _predict_send_data(self, stream_name, in_plugin_id, data_input):
        unique_id = self.stream_manager_api.SendData(stream_name, in_plugin_id,
                                                     data_input)
        if unique_id < 0:
            raise Exception("Failed to send data to stream")
        return unique_id

    def _predict_get_result(self, stream_name, unique_id):
        result = self.stream_manager_api.GetResult(stream_name, unique_id)
        if result.errorCode != 0:
            raise Exception(
                f"GetResultWithUniqueId error."
                f"errorCode={result.errorCode}, msg={result.data.decode()}")
        return result


def get_file_name(file_path):
    return os.path.splitext(os.path.basename(file_path.rstrip('/')))[0]


def result_encode(file_name, result):
    sep = ','
    pred_class_ids = sep.join(
        str(i.get('classId')) for i in result.get("MxpiClass", []))
    print("pred_class_ids=",pred_class_ids)
    return f"{file_name} {pred_class_ids}\n"


def parse_args():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('glob', help='img pth glob pattern.')
    parser.add_argument('result_file', help='result file')
    return parser.parse_args()


def main():
    pipeline_conf = "../data/config/densenet161.pipeline"
    stream_name = b'im_densenet161'        
    args = parse_args()
    res_dir_name = args.result_file
    if not os.path.exists(res_dir_name):
        os.makedirs(res_dir_name)
    dataset = GlobDataLoader(args.glob + "/*", limit=50000)
    with ExitStack() as stack:
        predictor = stack.enter_context(Predictor(pipeline_conf, stream_name))
        for fname, pred_result in predictor.predict(dataset):
            with open(os.path.join(res_dir_name, '{}_1.txt'.format(fname)), 'w') as f_write:
                res_vec = pred_result.get('MxpiClass')
                res_list = [str(item.get("classId")) + " " for item in res_vec]
                f_write.writelines(res_list)
                f_write.write('\n')
    print(f"success, result in {res_dir_name}")


if __name__ == "__main__":
    main()
