import cv2

from AGUtils import *
import numpy as np
import logging
import time

logger = logging.getLogger(__name__)

class AGArea:
    def __init__(self,dailyName,action):
        self.save_detect_img = True
        self.invalid = True
        self.top_left = ()
        if(len(action) != 5):
            logger.info("invalid action!!")
            return

        self.dailyName = dailyName
        self.name = action[0]
        self.serachRect = action[1]

        clickRect = action[2]
        if clickRect[0] == -1:
            clickRect = action[3]

        self.click = ((clickRect[0] + clickRect[2]) / 2, (clickRect[1] + clickRect[3]) / 2, self.name)

        self.templateRect = action[3]
        self.isRGB = action[4][0]
        self.isSearch = action[4][1]
        self.click_offset = action[4][2:4]

        if self.serachRect[0] == -1:
            self.serachRect = self.templateRect

        #print("{} {} {} {} {} {}".format(self.dailyName,self.serachRect,self.click,self.isRGB,self.isSearch,self.click_offset))

        path = "asserts/{}/{}.png".format(self.dailyName,self.name)
        logger.info(path)
        self.image = cv2.imread(path)

        if self.image is None:
            logger.error("cannot load template {}".format(path))
        else:
        #    print("type {}".format(type(self.image)))
            logger.info("{} {} search {} click {}".format(self.name,self.image.shape, self.isSearch, self.click))
            self.invalid = False

    def detect(self, image, enum=None):
        if self.isSearch == 0:
            return self.absCompare(image, enum)
        elif self.isSearch == 1:
            return self.searching(image, enum)
        elif self.isSearch == 2:
            return self.siftMathch(image, enum)








    def absCompare(self, image, enum=None):

        img0 = image[self.serachRect[1]:self.serachRect[3], self.serachRect[0]:self.serachRect[2]]

        # cv2.imshow("img1",self.image)
        # cv2.imshow("img2",img0)
        # cv2.waitKey()
        # logger.info("{} {} {}".format(self.name,self.image.shape,img0.shape))

        if self.save_detect_img and enum is not None:
            path = "{}/{}_{}.png".format(enum.debugDirDetect, enum.frames, self.name)
            logger.info("detect deg "+path)
            cv2.imwrite(path,drawTempAndImg(self.image, img0))


        res = cv2.absdiff(img0, self.image)
        score = np.mean(res)
        logger.info("{} score {}".format(self.name, score))

        return score

    def searching(self,image, enum=None):

        h,w,d = self.image.shape

        img0 = image[self.serachRect[1]:self.serachRect[3], self.serachRect[0]:self.serachRect[2]]

        top_left, score = self.search(img0, self.image)

        #if score > 20:
        #    return score

        self.top_left = (top_left[0] + self.serachRect[0], top_left[1] + self.serachRect[1])

        logger.info("top_left {}".format(self.top_left))
        self.click = (int(self.top_left[0] + w / 2 + self.click_offset[0]), int(self.top_left[1] + h / 2 + self.click_offset[1]),self.name)
        logger.info(self.click)

        if 1:
            img2 = image.copy()
            img00 = img0.copy()

            bottom_right = (top_left[0] + w, top_left[1] + h)
            cv2.rectangle(img00, top_left, bottom_right, 255, 2)
            #cv2.imshow("res", img00)

            top_left = (top_left[0] + self.serachRect[0], top_left[1] + self.serachRect[1])
            bottom_right = (top_left[0] + w, top_left[1] + h)
            cv2.rectangle(img2, top_left, bottom_right, 255, 2)
            cv2.circle(img2, (self.click[0], self.click[1]), 3, (0, 0, 255), -1)
            #cv2.imshow("res1", img2)
            #cv2.waitKey(1)

            if self.save_detect_img and enum is not None:
                path = "{}/{}_{}.png".format(enum.debugDirDetect, enum.frames, self.name)
                logger.info("detect deg " + path)
                cv2.imwrite(path, drawTempAndImg(self.image, img00))

        return score


    def siftMathch(self, image, enum=None):

        img1 = self.image
        img2 = image[self.serachRect[1]:self.serachRect[3], self.serachRect[0]:self.serachRect[2]]
        matches_num = 0
        sift = cv2.xfeatures2d.SIFT_create()
        t1 = time.time()
        # find the keypoints and descriptors with SIFT
        kp1, des1 = sift.detectAndCompute(img1, None)
        kp2, des2 = sift.detectAndCompute(img2, None)

        if 1:
            # FLANN 的两个字典参数index_params ，search_params
            FLANN_INDEX_KDTREE = 0
            index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=10)
            search_params = dict(checks=100)

            flann = cv2.FlannBasedMatcher(index_params, search_params)

            if des1 is None:
                logger.info("des1 is None {}".format(self))
                return 100
            if des2 is None:
                #cv2.imshow("image",image)
                logger.info("des2 is None {}".format(self))
                #cv2.waitKey()
                return  100

            matches = flann.knnMatch(des1, des2, k=2)
            # Need to draw only good matches, so create a mask
            matchesMask = [[0, 0] for i in range(len(matches))]

            # ratio test as per Lowe's paper
            matches_num = 0
            for i, (m, n) in enumerate(matches):
                if m.distance < 0.7 * n.distance:
                    matchesMask[i] = [1, 0]
                    matches_num += 1
            logger.info("%s took time %f, matches_num %d" % (self.name, time.time() - t1, matches_num))
            draw_params = dict(matchColor=(0, 255, 0),
                           singlePointColor=(255, 0, 0),
                           matchesMask=matchesMask,
                           flags=0)
            img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
            cv2.imshow("res",img3)
            cv2.waitKey()



        if 0:
            bf = cv2.BFMatcher()
            #返回k个最佳匹配
            matches = bf.knnMatch(des1, des2, k=2)
            # cv2.drawMatchesKnn expects list of lists as matches.
            #opencv3.0有drawMatchesKnn函数
            # Apply ratio test
            # 比值测试，首先获取与A 距离最近的点B（最近）和C（次近），只有当B/C
            # 小于阈值时（0.75）才被认为是匹配，因为假设匹配是一一对应的，真正的匹配的理想距离为0
            good = []

            for m, n in matches:
                if m.distance < 0.75 * n.distance:
                    good.append([m])
                    matches_num += 1

            logger.info("took time %f, matches_num %d" % (time.time() - t1, matches_num))
            img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good[:10], None, flags=2)
            cv2.imshow("res", img3)
            cv2.waitKey()

        if(matches_num < 10):
            return 100
        else:
            return 1






    ###@stat44icmethod
    #

    def search(self, img0, template, enum=None):
        w, h, d = template.shape
        methods = ['cv2.TM_CCOEFF',
                   'cv2.TM_CCOEFF_NORMED',
                   'cv2.TM_CCORR',
                   'cv2.TM_CCORR_NORMED',
                   'cv2.TM_SQDIFF',
                   'cv2.TM_SQDIFF_NORMED']
        method = methods[5]
        res = cv2.matchTemplate(img0, template, eval(method))
        min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
        # If the method is TM_SQDIFF or TM_SQDIFF_NORMED, take minimum
        if method in ['cv2.TM_SQDIFF', 'cv2.TM_SQDIFF_NORMED']:
            top_left = min_loc
        else:
            top_left = max_loc


        imgg = img0[top_left[1]:top_left[1] + w, top_left[0]:top_left[0] + h]

        imgg0 = cv2.absdiff(imgg, template)

        score = np.mean(imgg0)

        logger.info("{} score2 {}".format(self.name, score))

        return top_left, score


def loadActions( DailyName, areaDict, actions):
    logger.info("len of {}  actions {}".format(DailyName, len(actions)))

    for i in range(len(actions)):
        pass
        area = AGArea(DailyName, actions[i])
        if area.invalid == False:
            areaDict[area.name] = area

    logger.info("{} areaList size {}".format(DailyName, len(areaDict)))
