#!/usr/bin/python3
# -*- coding: utf-8 -*-

# @file preprocess.py
# @brief
# @author QRS
# @version 1.0
# @date 2022-02-17 17:11

import cv2, os
import math
import numpy as np
from scipy.stats import mode

from frepai.utils.logger import frepai_get_logger
from frepai.utils.mptask import (
    ICallable, State, ShmNumpy,
    TaskPipelineV2
)
from frepai.core.message import MessageType as MT
from frepai.core.message import ServiceType as ST
from frepai.core.message import EventType as ET

from frepai.utils.misc import frep_data


logger = frepai_get_logger()


class VideoRemoveStillWorker(ICallable):
    def __init__(self, msgdata, video_path, focus_box=None, black_box=None, rmstill=None, debug=False):
        self.mdata = msgdata
        self.video_path = video_path
        self.focus_box = focus_box
        self.black_box = black_box
        self.rmstill = rmstill
        self.debug = debug
        self.workdir = msgdata['workdir']

    def init(self, H):
        cap = cv2.VideoCapture(self.video_path)
        if not cap.isOpened():
            raise
        w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
        h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
        n = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
        r = cap.get(cv2.CAP_PROP_FPS)
        if self.focus_box:
            self.fx1, self.fy1 = int(w * self.focus_box[0]), int(h * self.focus_box[1])
            self.fx2, self.fy2 = int(w * self.focus_box[2]), int(h * self.focus_box[3])
        if self.black_box:
            self.bx1, self.by1 = int(w * self.black_box[0]), int(h * self.black_box[1])
            self.bx2, self.by1 = int(w * self.black_box[2]), int(h * self.black_box[3])
        self.cap = cap
        self.num = n
        self.fps = r
        self.frw = self.fx2 - self.fx1
        self.frh = self.fy2 - self.fy1
        if self.rmstill:
            self.area_thres = math.ceil(self.rmstill['area_rate_thres'] * self.frw * self.frh)
            self.bin_thres = self.rmstill['bin_thres']
            self.norm_brightness = self.rmstill['norm_brightness']
        if self.debug:
            self.keepidxes = []
            self.binpoints = []
            self.binframes = []

    def __call__(self, H, x, shm_frame_out):
        frame_pre = None
        frame_tmp = np.zeros((self.frh, self.frw), dtype=np.uint8)
        i = -1
        while True:
            success, frame_bgr = self.cap.read()
            if not success:
                break
            i += 1
            keep_flag = False
            if self.black_box:
                frame_bgr[self.by1:self.by2, self.bx1:self.bx2, :] = 0
            if self.focus_box:
                frame_bgr = frame_bgr[self.fy1:self.fy2, self.fx1:self.fx2, :]
            if self.rmstill:
                if self.norm_brightness:
                    h, s, v = cv2.split(cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2HSV))
                    v = np.array((v - np.mean(v)) / np.std(v) * 32 + 127, dtype=np.uint8)
                    frame_bgr = cv2.cvtColor(cv2.merge([h, s, v]), cv2.COLOR_HSV2BGR)
                frame_gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
                if frame_pre is not None:
                    frame_tmp = cv2.absdiff(frame_gray, frame_pre)
                    frame_tmp = cv2.threshold(frame_tmp, self.bin_thres, 255, cv2.THRESH_BINARY)[1]
                    frame_tmp = cv2.erode(frame_tmp, None, iterations=1)
                    frame_tmp = cv2.dilate(frame_tmp, None, iterations=1)
                    val = np.sum(frame_tmp == 255)
                    if val > self.area_thres:
                        keep_flag = True
                        if self.debug:
                            self.keepidxes.append(i)
                            self.binpoints.append(np.round(val / self.area_thres, 2))
                            self.binframes.append(cv2.resize(frame_tmp, (120, 120)))
                frame_pre = frame_gray

            if i % 500 == 0:
                self.mdata['progress'] = round(i * 100 / self.num, 2)
                H.send_message(MT.SERVICE, ST.S_PRE, ET.E_PROGRESS, self.mdata)

            if keep_flag:
                shm_frame_out.lock()
                shm_frame_out.data = frame_bgr
                yield {'frame_idx': i, 'progress': round(i / self.num, 2)}

        logger.info('process frame progress: 100%')
        yield State.STOP

    def shutdown(self, H):
        logger.info(f'{self.__class__.__name__} shutdown')
        if self.cap:
            self.cap.release()
        if self.debug:
            np.save(f'{self.workdir}/keepidxes.npy', self.keepidxes)
            np.save(f'{self.workdir}/binpoints.npy', self.binpoints)
            np.save(f'{self.workdir}/binframes.npy', np.asarray(self.binframes))


class VideoOpticalFlowWorker(ICallable):
    def __init__(self, msgdata, angle_ranges=[], mag_thres=5, of_params={}, debug=False):
        self.mdata = msgdata
        self.params = {
            'pyr_scale': of_params.get('pyr_scale', 0.5),
            'levels': of_params.get('levels', 3),
            'winsize': of_params.get('winsize', 15),
            'iterations': of_params.get('iterations', 3),
            'poly_n': of_params.get('poly_n', 5),
            'poly_sigma': of_params.get('poly_sigma', 1.1),
            'flags': cv2.OPTFLOW_LK_GET_MIN_EIGENVALS
        }
        self.angle_ranges = angle_ranges
        self.mag_thres = mag_thres
        self.workdir = msgdata['workdir']
        self.debug = debug

    def init(self, H):
        self.frame_pre = None
        self.stat_count = 0

    def __call__(self, H, x, shm_frame_in, shm_frame_out):
        self.stat_count += 1
        frame_bgr = shm_frame_in.data.copy().astype(np.uint8)
        shm_frame_in.unlock()
        frame_gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
        if self.frame_pre is None:
            self.frame_pre = frame_gray
        else:
            flow = cv2.calcOpticalFlowFarneback(self.frame_pre, frame_gray, None, **self.params)
            mag, ang = cv2.cartToPolar(flow[:, :, 0], flow[:, :, 1], angleInDegrees=True)
            move_mode = mode(ang[mag > self.mag_thres])[0]
            keep_flag = False
            for r in self.angle_ranges:
                if r[0] < move_mode <= r[1]:
                    keep_flag = True
                    break
            if keep_flag:
                shm_frame_out.lock()
                shm_frame_out.data = frame_bgr
                yield x

    def shutdown(self, H):
        logger.info(f'optical flow call count: {self.stat_count}')


class VideoModelInputsWorker(ICallable):
    def __init__(self, msgdata, input_width=112, input_height=112, rep_count=1, rot_angle=0, debug=False):
        self.mdata = msgdata
        self.input_width, self.input_height = input_width, input_height
        self.rep_count = rep_count
        self.rot_angle = rot_angle
        self.workdir = msgdata['workdir']
        self.debug = debug

    def init(self, H):
        if self.rot_angle > 0:
            width, height = self.input_width, self.input_height
            self.rot_matrix = cv2.getRotationMatrix2D(center=(int(width / 2), int(height / 2)), angle=self.rot_angle, scale=1.0)
        else:
            self.rot_matrix = None
        self.model_frames = []
        self.model_inputs = []

    def __call__(self, H, x, shm_frame_in):
        width, height = self.input_width, self.input_height
        rep_count, rot_matrix = self.rep_count, self.rot_matrix
        frame_bgr = shm_frame_in.data.copy().astype(np.uint8)
        shm_frame_in.unlock()
        if rep_count > 1:
            frame_bgr = np.hstack([frame_bgr] * rep_count)
            frame_bgr = np.vstack([frame_bgr] * rep_count)
        frame_bgr = cv2.resize(frame_bgr, (width, height))
        if rot_matrix is not None:
            frame_bgr = cv2.warpAffine(frame_bgr, rot_matrix, (width, height))

        self.model_frames.append(x['frame_idx'])
        self.model_inputs.append(frame_bgr)

    def shutdown(self, H):
        logger.info(f'model input call count: {len(self.model_frames)}')
        np.save(f'{self.workdir}/model_frames.npy', self.model_frames)
        np.save(f'{self.workdir}/model_inputs.npy', np.asarray(self.model_inputs))
        self.mdata['progress'] = 100.0
        H.send_message(MT.SERVICE, ST.S_PRE, ET.E_RESULT, self.mdata)
        # if len(self.model_inputs) > 0:
        #     w, h, c = self.model_inputs[0].shape
        #     mp4_file = os.path.join('/data', 'test.mp4')
        #     mp4_vid = cv2.VideoWriter(mp4_file, cv2.VideoWriter_fourcc(*'mp4v'), 25.0, (w, h))
        #     for frame in self.model_inputs:
        #         mp4_vid.write(frame)
        #     mp4_vid.release()


from frepai.core.base import TaskBase
from frepai.constants import PREPROCESS_TOPIC


class PreProcessTask(TaskBase):
    def __init__(self, logger, handler):
        super(PreProcessTask, self).__init__(logger, handler, ST.S_PRE, PREPROCESS_TOPIC)

    def step_run(self, cfg, H):
        self.logger.info(f'pre: {cfg}')

        focus_box = [0.318, 0.052, 0.986, 1]

        rmstill = {
            'bin_thres': 20,
            'area_rate_thres': 0.03,
            'norm_brightness': False,
        }

        # of_params = {
        #     'pyr_scale': 0.5,
        #     'levels': 3,
        #     'winsize': 15,
        #     'iterations': 3,
        #     'poly_n': 5,
        #     'poly_sigma': 1.1,
        #     'flags': cv2.OPTFLOW_LK_GET_MIN_EIGENVALS
        # }
        # angle_ranges = [(135, 225)]

        video_path = 'file:///frepai/data/20211209080518.mp4'
        out_dir = '/frepai/tmp/pipetest'
        os.makedirs(out_dir, exist_ok=True)
        logger.info(out_dir)
        pipeline = TaskPipelineV2(H)
        video_path = frep_data(video_path)
        msgdata = {'workdir': out_dir, 'pigeon': {'msgkey': '11111'}}
        pipeline.add(VideoRemoveStillWorker(msgdata, video_path, focus_box, rmstill=rmstill, debug=True), shms=[ShmNumpy('I', (640, 352, 3))])
        # pipeline.add(VideoOpticalFlowWorker(msgdata, angle_ranges, 5, of_params=of_params, debug=True), shms=[ShmNumpy('I', (640, 352, 3))])
        pipeline.add(VideoModelInputsWorker(msgdata, 112, 112, debug=True))
        pipeline.run()
