#!/usr/bin/env python
# -*- coding: utf-8 -*-

""""图像识别专用."""

import os
import sys
import types
import pathlib
from six import PY3
from copy import deepcopy

from cv import imgcv
from cv.imgcv import cv2
from cv.settings import Settings as ST  # noqa
from cv.error import InvalidMatchingMethodError
from cv.transform import TargetPos

from lib.logger import LoggerManager
from cv.imgcv.template_matching import TemplateMatching
from cv.imgcv.multiscale_template_matching import MultiScaleTemplateMatching,MultiScaleTemplateMatchingPre
from cv.imgcv.keypoint_matching import KAZEMatching, BRISKMatching, AKAZEMatching, ORBMatching
from cv.imgcv.keypoint_matching_contrib import SIFTMatching, SURFMatching, BRIEFMatching
lg = LoggerManager.get_logger(__name__,write_to_file=False)


MATCHING_METHODS = {
    "tpl": TemplateMatching,
    "mstpl": MultiScaleTemplateMatchingPre,
    "gmstpl": MultiScaleTemplateMatching,
    "kaze": KAZEMatching,
    "brisk": BRISKMatching,
    "akaze": AKAZEMatching,
    "orb": ORBMatching,
    "sift": SIFTMatching,
    "surf": SURFMatching,
    "brief": BRIEFMatching,
}
"""
模板识别：（不同分辨率机型匹配易失败）
    tpl：传统的模板匹配方法
    mstpl：基于模板匹配技术，通过预先定义的目标模板与待识别图像进行比对，从而确定目标元素的位置和大小。
           相比传统的基于特征点的算法，mstpl算法具有更高的鲁棒性和适应性，能够更好地处理界面元素的变形、遮挡等问题
    gmstpl：
特征点识别：
    SIFT：尺度不变特征转换(Scale-invariant feature transform或SIFT)，是目前应用最广泛的关键点检测和描述算法，对于多图，sift的识别效果最好，它占用的CPU也比较少，但占用内存较多；
    SURF：特征描述算子，speeded-Up Robust Features。surf的识别效果也很好，对于单张图来说，cpu,内存占用中等；
    BRISK: 全称为 Binary Robust Invariant Scalable Keypoints,也是SIFT算法的一种改进型，主要是针对于旋转不变性、鲁棒性、运算速度等方面做了优化
    KAZE: 是基于非线性插值的方法，这一点在图像处理方面来说确实比SURF和SIFT要好
    AKAZE: 速度更加快，比较新的算法，只有在opencv新版本中才有,是一种高效的特征检测和描述子提取算法，它是KAZE算法的加速版本
    ORB: 特征描述算法的运行时间远优于SIFT与SURF，可用于实时性特征检测,占用CPU和内存最少，但效果最差
    BRIEF: 是一个效率很高的提取特征描述子的方法，同时，当图像发生很大的平面内的旋转,它有着很好的识别率
"""

class Template(object):
    """
    picture as touch/swipe/wait/exists target and extra info for cv match
    filename: pic filename
    target_pos: ret which pos in the pic
    record_pos: pos in screen when recording
    resolution: screen resolution when recording
    rgb: 识别结果是否使用rgb三通道进行校验.
    scale_max: 多尺度模板匹配最大范围.
    scale_step: 多尺度模板匹配搜索步长.
    """

    def __init__(self, filename, threshold=None, target_pos=TargetPos.MID, record_pos=None, resolution=(), rgb=False, scale_max=800, scale_step=0.005):
        self.filename = filename
        self._filepath = None
        self.threshold = threshold or ST.THRESHOLD
        self.target_pos = target_pos
        self.record_pos = record_pos
        self.resolution = resolution
        self.rgb = rgb
        self.scale_max = scale_max
        self.scale_step = scale_step

    @property
    def filepath(self):
        if self._filepath:
            return self._filepath

        fs = pathlib.Path(self.filename).parts
        if fs[0] == '/' or fs[0] == '\\':
            fs = fs[1:]
        # for dirname in ST.BASEDIR:
        filepath = os.path.join(pathlib.Path(__file__).parents[2], 'excelCases', *fs)
        if os.path.isfile(filepath):
            self._filepath = filepath
            return self._filepath
        return self.filename

    def __repr__(self):
        filepath = self.filepath if PY3 else self.filepath.encode(sys.getfilesystemencoding())
        return "Template(%s)" % filepath

    def match_in(self, screen):
        match_result = self._cv_match(screen)

        lg.debug("match result: %s", match_result)
        if not match_result:
            return None
        focus_pos = TargetPos().getXY(match_result, self.target_pos)
        return focus_pos

    def _cv_match(self, screen):
        # in case image file not exist in current directory:
        ori_image = self._imread()
        image = self._resize_image(ori_image, screen, ST.RESIZE_METHOD)

        ret = None
        for method in ST.CVSTRATEGY:
            lg.info(f"使用图像算法【{method}】进行匹配")
            # get function definition and execute:
            func = MATCHING_METHODS.get(method, None)
            if func is None:
                raise InvalidMatchingMethodError("Undefined method in CVSTRATEGY: '%s', try 'kaze'/'brisk'/'akaze'/'orb'/'surf'/'sift'/'brief' instead." % method)
            else:
                if method in ["mstpl", "gmstpl"]:
                    ret = self._try_match(func, ori_image, screen, threshold=self.threshold, rgb=self.rgb, record_pos=self.record_pos,
                                            resolution=self.resolution, scale_max=self.scale_max, scale_step=self.scale_step)
                else:
                    ret = self._try_match(func, image, screen, threshold=self.threshold, rgb=self.rgb)
            if ret:
                break
        return ret

    @staticmethod
    def _try_match(func, *args, **kwargs):
        lg.debug("try match with %s" % func.__name__)
        try:
            ret = func(*args, **kwargs).find_best_result()
        except imgcv.NoModuleError as err:
            lg.warning("'surf'/'sift'/'brief' is in opencv-contrib module. You can use 'tpl'/'kaze'/'brisk'/'akaze'/'orb' in CVSTRATEGY, or reinstall opencv with the contrib module.")
            return None
        except imgcv.BaseError as err:
            lg.debug(repr(err))
            return None
        else:
            return ret

    def _imread(self):
        return imgcv.imread(self.filepath)

    def _find_keypoint_result_in_predict_area(self, func, image, screen):
        if not self.record_pos:
            return None
        # calc predict area in screen
        image_wh, screen_resolution = imgcv.get_resolution(image), imgcv.get_resolution(screen)
        xmin, ymin, xmax, ymax = Predictor.get_predict_area(self.record_pos, image_wh, self.resolution, screen_resolution)
        # crop predict image from screen
        predict_area = imgcv.crop_image(screen, (xmin, ymin, xmax, ymax))
        if not predict_area.any():
            return None
        # keypoint matching in predicted area:
        ret_in_area = func(image, predict_area, threshold=self.threshold, rgb=self.rgb)
        # calc cv ret if found
        if not ret_in_area:
            return None
        ret = deepcopy(ret_in_area)
        if "rectangle" in ret:
            for idx, item in enumerate(ret["rectangle"]):
                ret["rectangle"][idx] = (item[0] + xmin, item[1] + ymin)
        ret["result"] = (ret_in_area["result"][0] + xmin, ret_in_area["result"][1] + ymin)
        return ret

    def _resize_image(self, image, screen, resize_method):
        """模板匹配中，将输入的截图适配成 等待模板匹配的截图."""
        # 未记录录制分辨率，跳过
        if not self.resolution:
            return image
        screen_resolution = imgcv.get_resolution(screen)

        # 如果分辨率一致，则不需要进行im_search的适配:
        if tuple(self.resolution) == tuple(screen_resolution) or resize_method is None:

            return image
        if isinstance(resize_method, types.MethodType):
            resize_method = resize_method.__func__
        # 分辨率不一致则进行适配，默认使用cocos_min_strategy:
        h, w = image.shape[:2]
        w_re, h_re = resize_method(w, h, self.resolution, screen_resolution)
        # 确保w_re和h_re > 0, 至少有1个像素:
        w_re, h_re = max(1, w_re), max(1, h_re)
        # 调试代码: 输出调试信息.
        lg.debug("resize: (%s, %s)->(%s, %s), resolution: %s=>%s" % (
                        w, h, w_re, h_re, self.resolution, screen_resolution))

        # 进行图片缩放:
        image = cv2.resize(image, (w_re, h_re))
        return image


class Predictor(object):
    """
    this class predicts the press_point and the area to search im_search.
    """

    DEVIATION = 100

    @staticmethod
    def count_record_pos(pos, resolution):
        """计算坐标对应的中点偏移值相对于分辨率的百分比."""
        _w, _h = resolution
        # 都按宽度缩放，针对G18的实验结论
        delta_x = (pos[0] - _w * 0.5) / _w
        delta_y = (pos[1] - _h * 0.5) / _w
        delta_x = round(delta_x, 3)
        delta_y = round(delta_y, 3)
        return delta_x, delta_y

    @classmethod
    def get_predict_point(cls, record_pos, screen_resolution):
        """预测缩放后的点击位置点."""
        delta_x, delta_y = record_pos
        _w, _h = screen_resolution
        target_x = delta_x * _w + _w * 0.5
        target_y = delta_y * _w + _h * 0.5
        return target_x, target_y

    @classmethod
    def get_predict_area(cls, record_pos, image_wh, image_resolution=(), screen_resolution=()):
        """Get predicted area in screen."""
        x, y = cls.get_predict_point(record_pos, screen_resolution)
        # The prediction area should depend on the image size:
        if image_resolution:
            predict_x_radius = int(image_wh[0] * screen_resolution[0] / (2 * image_resolution[0])) + cls.DEVIATION
            predict_y_radius = int(image_wh[1] * screen_resolution[1] / (2 * image_resolution[1])) + cls.DEVIATION
        else:
            predict_x_radius, predict_y_radius = int(image_wh[0] / 2) + cls.DEVIATION, int(image_wh[1] / 2) + cls.DEVIATION
        area = (x - predict_x_radius, y - predict_y_radius, x + predict_x_radius, y + predict_y_radius)
        return area
