#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import pprint

import cv2
import numpy as np
from matplotlib import pyplot as plt

pp = pprint.PrettyPrinter(indent=4)

img = cv2.imread('/home/liusen/IdeaWorkspace/wechat-jump/images/00000_1514972261.png')
player = cv2.imread('/home/liusen/IdeaWorkspace/wechat-jump/temp_player.jpg')

template = cv2.imread('template.png')
# img = cv2.imread('images/00000_1514972261.png')
img = cv2.imread('images/00000_1514972261_crop.png')


# cv2.imwrite('images/00000_1514972261_crop.png', img[150:300, 600:800])
# cv2.imwrite('images/00000_1514972261_crop.png', img[500:900, 100:330])


def getFilteredImg(img):
    temp = img.copy()
    hsv2 = cv2.cvtColor(temp, cv2.COLOR_BGR2HSV)
    FILTER_MIN = np.array([100, 50, 20], np.uint8)
    FILTER_MAX = np.array([150, 120, 150], np.uint8)
    frame_threshed = cv2.inRange(hsv2, FILTER_MIN, FILTER_MAX)
    # frame_threshed2 = cv2.cvtColor(frame_threshed, cv2.COLOR_HSV2BGR)
    temp = cv2.bitwise_or(temp, temp, mask=frame_threshed)
    temp[np.where((temp == [0, 0, 0]).all(axis=2))] = [255, 255, 255]
    return temp


player = getFilteredImg(player)
img = getFilteredImg(img)

# surf = cv2.xfeatures2d.SURF_create()
# (kps, descs) = surf.detectAndCompute(player, None)
# print("# kps: {}, descriptors: {}".format(len(kps), descs.shape))
# img2 = cv2.drawKeypoints(player, kps, None, (255, 0, 0), 4)
# plt.imshow(img2), plt.show()

# sift = cv2.xfeatures2d.SIFT_create()
# (kps, descs) = sift.detectAndCompute(player, None)
# print("# kps: {}, descriptors: {}".format(len(kps), descs.shape))
# img2 = cv2.drawKeypoints(player, kps, None, (255, 0, 0), 4)
# plt.imshow(img2), plt.show()

# orb = cv2.ORB_create()
# kp = orb.detect(player, None)
# kp, des = orb.compute(player, kp)
# img2 = cv2.drawKeypoints(player, kp, None, color=(0, 255, 0), flags=0)
# plt.imshow(img2), plt.show()

# orb = cv2.ORB_create()
# kp1, des1 = orb.detectAndCompute(player, None)
# print("# kp1: {}, des1: {}".format(len(kp1), des1.shape))
# kp2, des2 = orb.detectAndCompute(img, None)
# print("# kp2: {}, des2: {}".format(len(kp2), des2.shape))
# bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# matches = bf.match(des1, des2)
# matches = sorted(matches, key=lambda x: x.distance)
# print("# matches.len: {}, matches: {}".format(len(matches), matches))
# img3 = drawMatches(player, kp1, img, kp2, matches)
# plt.imshow(img3), plt.show()


# gray = cv2.cvtColor(player, cv2.COLOR_BGR2GRAY)
# gray = np.float32(gray)
# # 输入图像必须是 float32 ,最后一个参数在 0.04 到 0.05 之间
# dst = cv2.cornerHarris(gray, 2, 3, 0.05)
# # result is dilated for marking the corners, not important
# dst = cv2.dilate(dst, None)
# print("# dst: {}".format(dst))
# # Threshold for an optimal value, it may vary depending on the image.
# player[dst > 0.2 * dst.max()] = [0, 0, 255]
# plt.imshow(gray), plt.show()
# sys.exit(0)

# player_edges = cv2.Canny(player, 100, 200)
# img_edges = cv2.Canny(img, 100, 200)
# plt.subplot(121), plt.imshow(player_edges)
# plt.subplot(122), plt.imshow(img_edges)
# plt.show()
# sys.exit(0)

surf = cv2.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(player, None)
print("# kp1: {}, des1: {}".format(len(kp1), des1.shape))
kp2, des2 = surf.detectAndCompute(img, None)
print("# kp2: {}, des2: {}".format(len(kp2), des2.shape))

FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# bf = cv2.BFMatcher()
# matches = bf.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
    if m.distance < 0.7 * n.distance:
        good.append([m])
print("# good.len: {}".format(len(good)))
if len(good) > 10:
    src_pts = np.float32([kp1[m[0].queryIdx].pt for m in good]).reshape(-1, 1, 2)
    dst_pts = np.float32([kp2[m[0].trainIdx].pt for m in good]).reshape(-1, 1, 2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    matchesMask = mask.ravel().tolist()
    # 获得原图像的高和宽
    h, w, *_ = player.shape
    # 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标。
    pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
    print("# pts: {}".format(pts))
    dst = cv2.perspectiveTransform(pts, M)
    print("# dst: {}".format(dst))
    cv2.polylines(img, [np.int32(dst)], True, 255, 10, cv2.LINE_AA)
else:
    print("Not enough matches are found - %d/%d" % (len(good), 10))
    matchesMask = None

# draw_params = dict(matchColor=(0, 255, 0),  # draw matches in green color
#                    singlePointColor=None,
#                    matchesMask=matchesMask,  # draw only inliers
#                    flags=2)
# img3 = cv2.drawMatches(player, kp1, img, kp2, good, None, **draw_params)
# plt.imshow(img3, 'gray'), plt.show()

img3 = cv2.drawMatchesKnn(player, kp1, img, kp2, good, None, flags=2)
plt.imshow(img3), plt.show()
