#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright 2020 Huawei Technologies Co., Ltd
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import time
import copy
from queue import Queue
import cv2
import acl
import numpy as np
from .pre_process import PreProcess
from .inference import Inference
from .pos_process import PostProcess
sys.path.append("..")
from resource.resource import ascend_resource
from base.dvpp import DvppInfo
from common.log import Log, check_ret
from base.stream_puller import AscendStreamPuller,\
                                run_stream,\
                                del_stream
from common.const import PIXEL_FORMAT_YUV_SEMIPLANAR_420, H264_BASELINE_LEVEL,\
                        DVPP, DEVICE, HOST,\
                        INFO, WARNING, ERROR


class WorkFlow():
    def __init__(self, command_dict, config_info_dict):
        self.class_name = self.__class__.__name__
        # command
        self.device = command_dict['device']
        self.model_path = command_dict['model_path']
        self.batch = command_dict['batch']
        self.model_input_width = command_dict['model_input_width']
        self.model_input_height = command_dict['model_input_height']
        # config
        self.channel_count = config_info_dict['channel_count']
        self.channel_id = config_info_dict['channel_id']
        self.rtsp = config_info_dict['rtsp']
        self.data_path = None
        self.image_width = config_info_dict['width']
        self.image_height = config_info_dict['height']
        self.encode_type = config_info_dict['encode_type']
        # queue
        self.queue = {'puller':[], 'preprocess':[],\
                'infer':[], 'postprocess':[]}
        self.q_batch = []
        # resource
        self.thread_pool = ascend_resource.thread_pool
        self.thread_info = ascend_resource.host_thread_info
        self.memory_pool = ascend_resource.memory_pool
        self.acl_resource = ascend_resource.resource_manager
        self.acl_resource_info = ascend_resource.resource_info
        self.thread_num = 0
        # puller objects
        self.puller = []
        # device objects
        self.dvpp = []
        self.infer = []
        # postprocess objects
        self.post = []
        # ctrl
        self.do_preprocess = []
        self.do_inference = []
        self.do_batch_assmble = []
        self.do_postprocess = []
        self.do_data_collection = []
        self.end_data_collection = []
        self.end_preprocess = []
        self.end_inference = []
        self.end_batch_assmble = []
        self.end_postprocess = []
        # info
        self.device_num = len(self.device)
        self.thread_arrange = None
        self.offset = [0 for n in range(0, self.device_num)]
        #init
        Log.do_log(INFO, self.class_name, 'WorkFlow', 'success to new object')

    def init_resource(self):
        # check
        info = self.check_stream_config()
        if info:
            os._exit(0)
        # init acl
        self.acl_resource.init_resource_manager()
        # init ctrl
        self.init_ctrl()
        # init thread arrange: average threads to each device
        info = self.init_thread_arrange()
        if info:
            os._exit(0)
        # init queue
        self.init_queue()
        # init threads in thread pool
        self.init_thread()
        # init runtime resource
        self.init_acl_resource()
        Log.do_log(INFO, self.class_name, 'WorkFlow', 'WorkFlow init success')

    def check_stream_config(self):
        info = 0
        # check channel id
        c_id_ = -1
        for c_id in self.channel_id:
            if c_id < 0:
                Log.do_log(ERROR, self.class_name, 'check_config', 'channel_id < 0')
                info = 1 
            if c_id == c_id_:
                Log.do_log(ERROR, self.class_name, 'check_config', 'same channel_id')
                info = 2
            c_id_ = c_id
        # check channel_count
        if self.channel_count <=0:
            Log.do_log(ERROR,self.class_name, 'check_config',
                'channel_count <= 0')
            info = 3
        if len(self.channel_id) != self.channel_count:
            Log.do_log(ERROR,self.class_name, 'check_config', 
                'numbers of channel_id != channel_count')
            info = 4
        # check rtsp
        if len(self.rtsp) != self.channel_count:
            Log.do_log(ERROR, self.class_name, 'check_config',
                'numbers of rtsp != channel_count')
            info = 5
        # check encode_type
        if self.encode_type != H264_BASELINE_LEVEL:
            Log.do_log(ERROR, self.class_name, 'check_config',
                'encode_type != H264_BASELINE_LEVEL')
            info = 6
        return info

    def init_thread_arrange(self):
        # thread arrange: average threads to each device
        num = self.channel_count // self.device_num
        self.thread_arrange = [num for n in range(0, self.device_num)]
        rest = self.channel_count - num * self.device_num
        if rest:
            while True:
                for i in range(0, self.device_num):
                    self.thread_arrange[i] += 1
                    rest -= 1
                    if rest <= 0:
                        break
        for i in range(0, self.device_num):
            if self.thread_arrange[i] > self.batch:
                Log.do_log(ERROR, self.class_name, 'init_thread_arrage', 
                'thread_arrange[{}] > batch, please run model with batch = {} at device{}'\
                .format(i, self.thread_arrange[i], self.device[i]))
                return 1
        # offset 
        for i in range(0, self.device_num):
            if i:
                offset += self.thread_arrange[i - 1]
            else:
                offset = 0
            self.offset[i] = offset

        # init buffer for each module        
        self.puller = [None for i in range(0, self.channel_count)]
        self.dvpp = [None for i in range(0, self.channel_count)]
        self.infer = [None for i in range(0, self.device_num)]
        self.post = [None for i in range(0, self.device_num)]
        return 0
    def init_acl_resource(self):
        for i in range(0, self.device_num):
            # set device and create context
            self.acl_resource.device_combination(self.device[i])
            self.acl_resource.create_context(self.device[i])

    def init_ctrl(self):
        self.end_data_collection.append(False)
        for i in range(0, self.channel_count):
            self.do_data_collection.append(True)
            self.do_preprocess.append(True)
            self.end_preprocess.append(False)

        for i in range(0, self.device_num):
            self.do_inference.append(True)
            self.do_batch_assmble.append(True)
            self.do_postprocess.append(True)
            self.end_inference.append(False)
            self.end_batch_assmble.append(False)
            self.end_postprocess.append(False)

    def init_queue(self):
        # create queue for main pipeline
        for key in self.queue:
            if key == 'infer' or key == 'postprocess':
                for i in range(0, len(self.device)):
                    quene = Queue(maxsize=50)
                    self.queue[key].append(quene)
            else:
                for i in range(0, self.channel_count):
                    quene = Queue(maxsize=50)
                    self.queue[key].append(quene)
        # create queue for assmbling batch
        for i in range(0, len(self.device)):
            quene = Queue(maxsize=50)
            self.q_batch.append(quene)

    def init_thread(self):
        # 4: stream puller, vdec+dvpp, infer, post_process
        self.thread_num = self.channel_count + 3 * len(self.device) + 1
        ret = self.thread_pool.host_thread_pool(self.thread_num)
        check_ret(self.class_name, 'init_thread',
                'fail to create thread pool', ret)

    def data_collection(self, i, context):
        puller = AscendStreamPuller(self.channel_id[i], self.rtsp[i], 
                                    None, self.queue['puller'][i], 
                                    self.image_width, self.image_height,
                                    context)
        puller.start_stream()
        self.puller[i] = puller
    
    def pre_process(self, context, i):
        # stream
        stream, ret = self.acl_resource.create_stream(context)
        #dvpp conf
        dvpp_conf_dict = {'vdec': [self.channel_id[i],\
                                    self.encode_type,\
                                    PIXEL_FORMAT_YUV_SEMIPLANAR_420],\
                        'dvpp': [self.model_input_height,\
                                self.model_input_width]}
        dvpp = PreProcess(context, stream, dvpp_conf_dict)
        self.dvpp[i] = dvpp

        while self.do_preprocess[i]:
            # recevice input data
            if self.queue['puller'][i].empty():
                Log.do_log(WARNING, self.class_name, 'pre_process',
                        'queue[puller][{}] is empty'.format(i))
                time.sleep(0.01)
                continue
            frame_data = self.queue['puller'][i].get()
            # vdec + vpc
            vdec_yuv_ref, resize_yuv_device = dvpp.run_preprocess(frame_data)
            if vdec_yuv_ref is None:
                time.sleep(0.01)
                continue
            # send resize result to model
            if self.queue['preprocess'][i].full():
                self.memory_pool.release_memory(resize_yuv_device.buffer_ptr,
                                            DVPP)
                self.memory_pool.release_memory(vdec_yuv_ref.buffer_ptr,
                                            DVPP)
                Log.do_log(WARNING, self.class_name, 'pre_process',
                        'queue[preprocess][{}] is full'.format(i))
            else:
                self.queue['preprocess'][i].put([resize_yuv_device,\
                                            vdec_yuv_ref])
                Log.do_log(INFO, self.class_name, 'pre_process',
                        'queue[preprocess][{}] is sending'.format(i))
        self.end_preprocess[i] = True

    def batch_assmble(self, context, i):
        ret = acl.rt.set_context(context)
        # init batch of frame_ids and channel_ids
        frame_id = [0] * self.thread_arrange[i]
        channel_id = [0] * self.thread_arrange[i]
        yuv_batch_ref = [[0, 0, 0, 0] for j in range(0, self.thread_arrange[i])]
        yuv_batch_device = [[0, 0, 0, 0] for j in range(0, self.thread_arrange[i])]
        count = 0
        while self.do_batch_assmble[i]:
            # assemble data batch
            for j in range(0, self.thread_arrange[i]):
                if self.queue['preprocess'][j + self.offset[i]].empty():
                    Log.do_log(WARNING, self.class_name,
                            'batch_assmble[{}]'.format(i),
                            'queue[preprocess][{}] is empty'.format(j))
                    time.sleep(0.01)
                    continue
                else:
                    # recevice data(need free data memory after transfering)
                    [yuv_device, yuv_ref] = self.queue['preprocess'][j + self.offset[i]].get()
                    yuv_batch_ref[count][0] = yuv_ref.buffer_ptr
                    yuv_batch_ref[count][1] = yuv_ref.buffer_size
                    yuv_batch_ref[count][2] = yuv_ref.aligned_width
                    yuv_batch_ref[count][3] = yuv_ref.aligned_height
                    yuv_batch_device[count][0] = yuv_device.buffer_ptr
                    yuv_batch_device[count][1] = yuv_device.buffer_size
                    yuv_batch_device[count][2] = yuv_device.aligned_width
                    yuv_batch_device[count][3] = yuv_device.aligned_height
                    # frame_id and channel_id
                    frame_id[count] = yuv_device.frame_id
                    channel_id[count] = yuv_device.channel_id
                    # at first, all batch data have to be received
                    yuv_batch_ref_ = copy.deepcopy(yuv_batch_ref)
                    yuv_batch_device_ = copy.deepcopy(yuv_batch_device)
                    frame_id_ = copy.deepcopy(frame_id)
                    channel_id_ = copy.deepcopy(channel_id)
                    count += 1
                    if count >= self.thread_arrange[i]:
                        count = 0 
                        if self.q_batch[i].full():
                            Log.do_log(WARNING, self.class_name,
                                    'batch_assmble[{}]'.format(i),
                                    'self.q_batch[{}] is full'.format(i))
                            for k in range(0, self.thread_arrange[i]):
                                self.memory_pool.release_memory(yuv_batch_ref_[k][0], DVPP)
                                self.memory_pool.release_memory(yuv_batch_device_[k][0], DVPP)
                        else:
                            self.q_batch[i].put([frame_id_, channel_id_, yuv_batch_ref_, yuv_batch_device_])
        self.end_batch_assmble[i] = True   

    def inference(self, context, i):
        # init stream at context
        stream, ret = self.acl_resource.create_stream(context)
        # init model
        infer = Inference(context, stream, self.model_path,
                self.model_input_height, self.model_input_width,
                self.image_height, self.image_width, self.thread_arrange[i])
        self.infer[i] = infer
        while self.do_inference[i]:
            # assemble data batch
            if self.q_batch[i].empty():
                Log.do_log(WARNING, self.class_name, 'inference',
                        'q_batch[{}] is empty'.format(i))
                time.sleep(0.02)
                continue
            # recevice data(need free data memory after transfering  it)
            [frame_id, channel_id, yuv_ref, yuv_dev] = self.q_batch[i].get()
            # inference
            infer_result = infer.run_inference(yuv_dev)
            # result
            batch_infer_result = [frame_id, channel_id, infer_result, yuv_ref]
            # send result to host
            if self.queue['infer'][i].full():
                Log.do_log(WARNING, self.class_name, 'inference',
                        'queue[infer][{}] is full'.format(i))
                for j in range(0, self.thread_arrange[i]):
                    self.memory_pool.release_memory(yuv_ref[j][0], DVPP)
                for j in range(0, len(infer_result)):
                    self.memory_pool.release_memory(infer_result[j][0], HOST)
            else:
                self.queue['infer'][i].put(batch_infer_result)
        self.end_inference[i] = True

    def postprocess(self, context, i):
        # context
        ret = acl.rt.set_context(context)
        # init post process
        post = PostProcess(context, 
                self.model_input_height, self.model_input_width,
                self.image_height, self.image_width, self.thread_arrange[i])
        self.post[i] = post
        while self.do_postprocess[i]:
            # get infer result
            if self.queue['infer'][i].empty():
                Log.do_log(WARNING, self.class_name, 'inference',
                        'self.queue[infer][{}] is empty'.format(i))
                time.sleep(0.01)
                continue
            [frame_id, channel_id, infer_result, yuv_batch_ref]\
                                    = self.queue['infer'][i].get()
            post.run_postprocess(frame_id, channel_id,
                            infer_result, yuv_batch_ref,
                            self.infer[i].model.output_info)
        self.end_postprocess[i] = True
    
    def run_data_collection(self):
        # run data puller
        count = 0
        for i in range(0, self.device_num):
            context = self.acl_resource_info.device_context_info[self.device[i]]
            for j in range(0, self.thread_arrange[i]):
                self.data_collection(count, context[0])
                count += 1
        ret = self.thread_pool.combined_function_to_host_thread(run_stream,[])
        check_ret(self.class_name, 'run',
            'fail to combine data_collection[{}] to thread'.format(i), ret)
    
    def run_pre_process(self):
        count = 0
        for i in range(0, self.device_num):
            context = self.acl_resource_info.device_context_info[self.device[i]]
            for j in range(0, self.thread_arrange[i]):
                ret = self.thread_pool.combined_function_to_host_thread(self.pre_process, [*context, count])
                check_ret(self.class_name, 'run',
                    'fail to combine pre_process[{}] to thread'.format(i), ret)
                count += 1
    
    def run_inference(self):
        for i in range(0, self.device_num):
            context = self.acl_resource_info.device_context_info[self.device[i]]
            ret = self.thread_pool.combined_function_to_host_thread(self.batch_assmble, [*context, i])
            check_ret(self.class_name, 'run', 'fail to combine pre_process[{}] to thread'.format(i), ret)
            ret = self.thread_pool.combined_function_to_host_thread(self.inference, [*context, i])
            check_ret(self.class_name, 'run', 'fail to combine inference[{}] to thread'.format(i), ret)
            ret = self.thread_pool.combined_function_to_host_thread(self.postprocess, [*context, i])
            check_ret(self.class_name, 'run', 'fail to combine postprocess[{}] to thread'.format(i), ret)
    
    def run_work_flow(self):
        self.run_data_collection()
        self.run_pre_process()
        self.run_inference()
    
    def stop_thread(self):
        # stop all thread in the thread pool
        for i in range(0, self.channel_count):
            self.do_data_collection[i] = False
            self.do_preprocess[i] = False
        for i in range(0, self.device_num):
            self.do_inference[i] = False
            self.do_batch_assmble[i] = False
            self.do_postprocess[i] = False

    def release_puller(self):
        count = 0
        for j in range(0, self.device_num):
            context = self.acl_resource_info.device_context_info[self.device[j]]
            ret = acl.rt.set_context(context[0])
            for i in range(0, self.thread_arrange[j]):
                self.puller[count].stop_stream()
                print(self.queue['puller'][count], self.queue['puller'][count].qsize())
                for _ in range(0, self.queue['puller'][count].qsize()):
                    data = self.queue['puller'][count].get()
                    self.memory_pool.release_memory(data.buffer_ptr, DVPP)
                count += 1    
        del_stream()         
        self.end_data_collection[0] = True

    def release_preprocess(self):
        count = 0
        for j in range(0, self.device_num):
            context = self.acl_resource_info.device_context_info[self.device[j]]
            for i in range(0, self.thread_arrange[j]):
                if self.dvpp[count]:
                    self.dvpp[count].release_preprocess()
                    for _ in range(0, self.queue['preprocess'][count].qsize()):
                        [data_dev, data_ref] = self.queue['preprocess'][count].get()
                        self.memory_pool.release_memory(data_dev.buffer_ptr, DVPP)
                        self.memory_pool.release_memory(data_ref.buffer_ptr, DVPP)
                count += 1
    
    def release_infer(self):
        for i in range(0, self.device_num):
            if self.infer[i]:
                self.infer[i].release_inference()
                for j in range(0, self.q_batch[i].qsize()):
                    [_, _, ref, dev] = self.q_batch[i].get()
                    for k in range(0, self.thread_arrange[i]):
                        self.memory_pool.release_memory(ref[k][0], DVPP)
                        self.memory_pool.release_memory(dev[k][0], DVPP)
                for j in range(0, self.queue['infer'][i].qsize()):
                    [_, _, res, ref] = self.queue['infer'][i].get()
                    for k in range(0, len(res)):
                        self.memory_pool.release_memory(res[k][0], HOST)
                    for k in range(0, self.thread_arrange[i]):
                        self.memory_pool.release_memory(ref[k][0], DVPP)
            if self.post[i]:
                self.post[i].release_postprocess()

    def check_end(self):
        endTime = time.time() + 5
        while time.time() < endTime:
            end_num = 0
            #if self.end_data_collection[0]:
            #    end_num += 1
            for i in range(0, self.channel_count):
                if self.end_preprocess[i]:
                    end_num += 1
            for i in range(0, self.device_num):
                if self.end_batch_assmble[i]:
                    end_num += 1
                if self.end_postprocess[i]:
                    end_num += 1
                if self.end_inference[i]:
                    end_num += 1
            if end_num >= self.thread_num-1:
                break
            time.sleep(1)
        return end_num

    def release_work_flow(self):
        self.stop_thread()
        #self.thread_pool.wait_host_thread(self.thread_info.task_list)
        end_num = self.check_end()
        print('-----------------num: {} {} -------------'.format(self.thread_num, end_num))
        self.release_puller()
        if self.end_data_collection[0]:
            print('-----------------finish puller release-----------------')
            self.release_preprocess()
            print('-----------------finish dvpp release-----------------')
            self.release_infer()
            print('-----------------finish infer release-----------------')
            self.acl_resource.release_runtime_resource()
            print('-------------------acl runtime release ---------------')
            self.thread_pool.destroy_host_thread()
            print('-------------------threads release--------------------')





