
import mouser_opt
import getntptime
import img_ocr

#pyinstaller -F main.py   打包发布

#if(getntptime.checktimeout()):
#    exit()


#mouser_opt.click_opt()

# 识别文字，并指定语言
#string = img_ocr.getStrByImg('ocr-test.png')
#print(string)

#print("app exit") 


import cv2
import os
import time

#获取移动端图片
#def screencap():
#    cmd = "adb root"
#    cmd1 = "adb shell /system/bin/screencap -p /sdcard/da.png"
#    cmd2 = "adb pull /sdcard/da.png "
#    os.system(cmd)
#    time.sleep(1)
#    os.system(cmd1)
#    time.sleep(2)
#    os.system(cmd2)


def _tran_canny(image):
    """消除噪声"""
    return image
    #image = cv2.GaussianBlur(image, (3, 3), 0)
    #return cv2.Canny(image, 50, 150)


def detect_displacement(img_slider_path, image_background_path):
    """detect displacement"""
    # # 参数0是灰度模式
    image = cv2.imread(img_slider_path, 0)
    template = cv2.imread(image_background_path, 0)
    print(template.shape)

    template = cv2.resize(template,(2048,int(template.shape[0] * 2048 / template.shape[1])))
    print(template.shape)
    cv2.imshow("template",template)
    cv2.waitKey(0)

    # 寻找最佳匹配
    res = cv2.matchTemplate(_tran_canny(image), _tran_canny(template), cv2.TM_CCOEFF_NORMED)
    # 最小值，最大值，并得到最小值, 最大值的索引
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)

    top_left = max_loc[0]  # 横坐标
    # 展示圈出来的区域
    x, y = max_loc  # 获取x,y位置坐标、
    print(max_loc)

    template = cv2.rectangle(template,(x, y), (x+30, y+50),(0, 0, 255),2)
    cv2.imshow("template",template)
    cv2.waitKey(0)
    return max_loc

    # w, h = image.shape[::-1]  # 宽高
    # cv2.rectangle(template, (x, y), (x + w, y + h), (7, 249, 151), 2)
    # return top_left

import cv2
from matplotlib import pyplot as plt
def BFMatching():
    #opencv----特征匹配----BFMatching
    #读取需要特征匹配的两张照片，格式为灰度图。
    template=cv2.imread(r'./dist/result_btn.png',0)
    target=cv2.imread(r'./dist/desk3.png',0)
    orb=cv2.ORB_create()#建立orb特征检测器
    kp1,des1=orb.detectAndCompute(template,None)#计算template中的特征点和描述符
    kp2,des2=orb.detectAndCompute(target,None) #计算target中的
    bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True) #建立匹配关系
    mathces=bf.match(des1,des2) #匹配描述符
    mathces=sorted(mathces,key=lambda x:x.distance) #据距离来排序
    print(mathces)
    result= cv2.drawMatches(template,kp1,target,kp2,mathces[:40],None,flags=2) #画出匹配关系
    cv2.imshow("h",result)
    cv2.waitKey()
    #plt.imshow(result),plt.show() #matplotlib描绘出来

import numpy as np
import cv2
from matplotlib import pyplot as plt
def BFMatching2():
    MIN_MATCH_COUNT = 10 #设置最低特征点匹配数量为10
    template = cv2.imread(r'./dist/desk.png',0) # queryImage
    target = cv2.imread(r'./dist/result_btn.png',0) # trainImage
    # Initiate SIFT detector创建sift检测器
    sift = cv2.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(template,None)
    kp2, des2 = sift.detectAndCompute(target,None)
    #创建设置FLANN匹配
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    # store all the good matches as per Lowe's ratio test.
    good = []
    #舍弃大于0.7的匹配
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    if len(good)>MIN_MATCH_COUNT:
        # 获取关键点的坐标
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        #计算变换矩阵和MASK
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()
        h,w = template.shape
        # 使用得到的变换矩阵对原图像的四个角进行变换，获得在目标图像上对应的坐标
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)
        cv2.polylines(target,[np.int32(dst)],True,(255, 0, 0),2, cv2.LINE_AA)
    else:
        print( "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
        matchesMask = None
    draw_params = dict(matchColor=(0,255,0), 
                    singlePointColor=None,
                    matchesMask=matchesMask, 
                    flags=2)

    result = cv2.drawMatches(template,kp1,target,kp2,good,None,**draw_params)
    plt.imshow(result)
    plt.show()


import aircv as ac 
def aircv_find():
    imgsrc = ac.imread(r'./dist/desk3.png')  # 打开查找页，大图
    imgobj = ac.imread(r'./dist/result_btn.png')  # 打开待识别的图片，小图
    match_result = ac.find_all_template(imgsrc, imgobj, 0.9)  # 0.9是识别达标率，平匹配度大于0.9才会返回
    print('match_result:%s' % match_result)  #识别到的图片是一个数组
    # match_result:[{'result': (115.5, 914.0),
    #  'rectangle': ((42, 843), (42, 985), (189, 843), (189, 985)),  #匹配图片的四个角的坐标
    # 'confidence': 1.0}]   #匹配度为 100%
    if len(match_result) > 0:
        print(len(match_result))
        x1, y1 = match_result[0]['result']
        print(x1)  #115.5   X的中心点
        print(y1)  #914.0   y的中心点
        pass
    else:
        print('识别不到要点击的目标')


if __name__ == '__main__':
    #screencap()
    #detect_displacement(r'./dist/result_btn.png', r'./dist/desk3.png')
    BFMatching2()
    #aircv_find()






