import cv2,collections,copy,time,math
import numpy as np
from matplotlib import pyplot as plt
from support.logging.log import *
from support.const.state import *

class UIConfidence:
    @staticmethod
    def _save_result(path, filename, img, rectangle=None, circle=None):
        img_cpy = copy.deepcopy(img)
        if(rectangle is not None):
            cv2.rectangle(img_cpy, rectangle[0], rectangle[1], (0,0,255),2)
        if(circle is not None):
            cv2.circle(img_cpy, circle, 5, (255,0,0),1)
        if(filename is None):
            result_file = os.path.join(path, "result_match-{}.png".format(time.strftime('%Y%m%d!%H%M%S', time.localtime(time.time()))))
        else:
            result_file = os.path.join(path, "result_match-{}.png".format(filename))
        cv2.imwrite(result_file, img_cpy)
    @staticmethod
    def _show(label, img, rectangle=None, circle=None, timeout=None):
        img_cpy = copy.deepcopy(img)
        if(rectangle is not None):
            cv2.rectangle(img_cpy, rectangle[0], rectangle[1], (0,0,255),2)
        if(circle is not None):
            cv2.circle(img_cpy, circle, 10, (0,0,255),3)
        cv2.namedWindow(label, cv2.WINDOW_FREERATIO)
        cv2.imshow(label, img_cpy)
        if(timeout is None):
            timeout = 0
        cv2.waitKey(timeout)
        cv2.destroyWindow(label)
        return img_cpy
    @staticmethod
    def _calc_border_with(offset):
        if(offset % 2 == 0):
            b1 = offset / 2
            b2 = b1
        elif(offset > 1):
            b1 = (offset - 1) / 2
            b2 = b1 + 1
        else:
            b1 = 0
            b2 = 1
        return int(b1), int(b2)
    @staticmethod
    def _calc_ex_border(img1, img2):
        info1 = img1.shape
        info2 = img2.shape
        offset_height = abs(info1[0] - info2[0])
        offset_width = abs(info1[1] - info2[1])
        top, bottom = UIConfidence._calc_border_with(offset_height)
        left, right = UIConfidence._calc_border_with(offset_width)
        return top,bottom,left,right
    @staticmethod
    def _sift_detect(*imgs):
        sift = cv2.SIFT_create()
        ret = []
        for img in imgs:
            ret.append(sift.detectAndCompute(img, None))
        return ret
    @staticmethod
    def _orb_detect(*imgs):
        orb = cv2.ORB_create()
        ret = []
        for img in imgs:
            ret.append(orb.detectAndCompute(img, None))
        return ret
    @staticmethod
    def _surf_detect(*imgs):
        surf = cv2.features2d.SURF_create()
        ret = []
        for img in imgs:
            ret.append(surf.detectAndCompute(img, None))
        return ret
    @staticmethod
    def _bf_knn_match(des1, des2, ratio=0.75):
        bf = cv2.BFMatcher()
        matches = bf.knnMatch(des1, des2, k=2)
        good = []
        for m,n in matches:
            if m.distance < ratio * n.distance:
                good.append([m])
        return good
    @staticmethod
    def _flann_knn_match(des1, des2, ratio=0.4):
        FLANN_INDEX_KDTREE = 0
        index_params = dict(algorithm = FLANN_INDEX_KDTREE, tress= 5)
        search_params = dict(checks=50)
        flann = cv2.FlannBasedMatcher(index_params, search_params)
        matches = flann.knnMatch(des1, des2, k=2)
        good = []
        for m,n in matches:
            if m.distance < ratio * n.distance:
                good.append([m])
        return good, len(matches)
    @staticmethod
    def _calc_point_from(good, kp):
        pointArray = []
        for m in good:
            kpt = kp[m[0].trainIdx].pt
            pointArray.append(kpt)
        np_kps = np.array(pointArray)
        # np_kps = UIConfidence.__remove_outlier(np_kps)
        # print("-------------------",np_kps)
        min_kp =np_kps.min(axis=0)
        max_kp = np_kps.max(axis=0)
        return tuple(map(int, min_kp)), tuple(map(int, max_kp))
        
    @staticmethod
    def __remove_outlier(points):
        median = np.median(points, axis=0)
        deviations = abs(points - median)
        mad = np.median(deviations, axis=0)
        remove_idx = np.where(deviations > mad * Global.MAD_THRESHOLD)
        return np.delete(points, remove_idx, axis=0)
        
    @staticmethod
    def _template_match_in(templ, img_src, confidence):
        # if(Global.MATCH_THROUGH_GRAY):
            # img_src = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
            # templ = cv2.cvtColor(templ, cv2.COLOR_BGR2GRAY)
        h, w = templ.shape[:2]
        ret = {"confidence":0, "rectangle":[None,None]}
        for match_method in [cv2.TM_CCOEFF, cv2.TM_CCOEFF_NORMED, #cv2.TM_CCORR,
            cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF, cv2.TM_SQDIFF_NORMED]:
            result = cv2.matchTemplate(img_src, templ, match_method)
            cv2.normalize( result, result, 0, 1, cv2.NORM_MINMAX, -1 )
            _minVal, _maxVal, minLoc, maxLoc = cv2.minMaxLoc(result, None)
            if (match_method == cv2.TM_SQDIFF or match_method == cv2.TM_SQDIFF_NORMED):
                matchLoc = minLoc
            else:
                matchLoc = maxLoc
            # 求取可信度:
            _confidence = UIConfidence._get_confidence_from_matrix(img_src, templ, matchLoc, _maxVal, w, h)
            # print("confidence:{}, pos:{} size:{}x{}".format(_confidence, matchLoc, matchLoc[0] + templ.shape[1], matchLoc[1] + templ.shape[0]))
            if(_confidence > ret['confidence']):
                ret['confidence'] = _confidence
                ret['rectangle'][0] = matchLoc
                ret['rectangle'][1] = (matchLoc[0] + templ.shape[1], matchLoc[1] + templ.shape[0])
        if(Global.DEBUG_SHOW or Global.DEBUG_SHOW_MATCH_DETAIL):
            real_confidence = 0
            rectangle = None
            if(ret):
                real_confidence = ret['confidence']
                rectangle = ret['rectangle']                    
            img_tag_cpy = copy.deepcopy(img_src)
            if(rectangle is not None):
                cv2.rectangle(img_tag_cpy, rectangle[0], rectangle[1], (0,0,255),2)
            top,bottom,left,right = UIConfidence._calc_ex_border(templ, img_src)
            img_src_ex = cv2.copyMakeBorder(templ, top,bottom,left,right, cv2.BORDER_CONSTANT,value=(255,255,255))
            img_ret = np.hstack((img_src_ex,img_tag_cpy))
            UIConfidence._show("confidence ratio:{}".format(real_confidence), img_ret, timeout=Global.BLOB_SHOW_TIMEOUT)
            # UIConfidence._save_result(os.path.dirname(srcpath), str(endtime-starttime), img_src, [minp, maxp])
        if(ret['confidence'] > confidence):
            return ret
        return None
    @staticmethod
    #来自Airtest  template.py
    def _get_confidence_from_matrix(im_source, im_search, max_loc, max_val, w, h):
        """根据结果矩阵求出confidence."""
        # 求取可信度:
        # if Global.MATCH_THROUGH_GRAY:
            # confidence = max_val
        # else:
            # 如果有颜色校验,对目标区域进行BGR三通道校验:
        img_crop = im_source[max_loc[1]:max_loc[1] + h, max_loc[0]: max_loc[0] + w]
        confidence = UIConfidence.cal_rgb_confidence(img_crop, im_search)
        return confidence
    @staticmethod
    #来自Airtest cal_confidence.py
    def cal_rgb_confidence(img_src_rgb, img_sch_rgb):
        """同大小彩图计算相似度."""
        # 扩展置信度计算区域
        img_sch_rgb = cv2.copyMakeBorder(img_sch_rgb, 10,10,10,10,cv2.BORDER_REPLICATE)
        # 转HSV强化颜色的影响
        img_src_rgb = cv2.cvtColor(img_src_rgb, cv2.COLOR_BGR2HSV)
        img_sch_rgb = cv2.cvtColor(img_sch_rgb, cv2.COLOR_BGR2HSV)
        src_bgr, sch_bgr = cv2.split(img_src_rgb), cv2.split(img_sch_rgb)

        # 计算BGR三通道的confidence，存入bgr_confidence:
        bgr_confidence = [0, 0, 0]
        for i in range(3):
            res_temp = cv2.matchTemplate(src_bgr[i], sch_bgr[i], cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res_temp)
            bgr_confidence[i] = max_val
        return min(bgr_confidence)
    @staticmethod
    def _feature_match_in(img_src, img_tag, confidence):
        ret = None
        if(Global.MATCH_THROUGH_GRAY):
            img_src = cv2.cvtColor(img_src, cv2.COLOR_BGR2GRAY)
            img_tag = cv2.cvtColor(img_tag, cv2.COLOR_BGR2GRAY)
        detectInfos = UIConfidence._sift_detect(img_src, img_tag)
        good, count = UIConfidence._flann_knn_match(detectInfos[0][1], detectInfos[1][1])
        real_confidence = 0
        if(count):
            real_confidence = len(good)/count
        if(real_confidence >= confidence):
            minp,maxp = UIConfidence._calc_point_from(good, detectInfos[1][0])
            ret = {"confidence":real_confidence, "rectangle":[minp, maxp]}
        if(Global.DEBUG_SHOW or Global.DEBUG_SHOW_MATCH_DETAIL):
            img_ret = cv2.drawMatchesKnn(img_src, detectInfos[0][0], img_tag, detectInfos[1][0], good, None, flags=2)
            UIConfidence._show("good ratio:{}".format(real_confidence), img_ret, timeout=Global.BLOB_SHOW_TIMEOUT)
        return ret

    @staticmethod
    def air_match_in(img_src, img_tag, confidence):
        ret = UIConfidence._template_match_in(img_src, img_tag, confidence)
        if(ret is None):
            print("基于模板未匹配上合适图形，尝试基于特征匹配")
            ret = UIConfidence._feature_match_in(img_src, img_tag, confidence)
        return ret
    @staticmethod
    def screen_match(last, current, confidence=Confidence.SCREEN_SIMILARITY):
        ret = UIConfidence.air_match_in(last, current, confidence)
        return ret is not None
    @staticmethod
    # input fmt:[(left,top), (right,bottom)]
    # return: left    1   top     2  right    3  bottom 4
    def getDirection(src, dst):
        ret = 0
        if(dst[0][0] >= src[1][0]):
            ret = 1
        if(dst[0][1] >= src[1][1]):
            ret = 2
        if(dst[1][0] <= src[0][0]):
            ret = 3
        if(dst[1][1] <= src[0][1]):
            ret = 4
        return ret
    
    @staticmethod
    def detectColor(img, color):
        # img = cv2.imread(res)
        lower_range = np.array(color[0])
        upper_range = np.array(color[1])
        mask = cv2.inRange(img, lower_range, upper_range)
        matchPoint = 0
        countPoint = 0
        for row in mask:
            for column in row:
                if(column != 0):
                    matchPoint += 1
                countPoint += 1
        return matchPoint/countPoint