import cv2 as cv
import numpy as np
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import *
import win32gui
import sys, os

import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-w', '--windows', required=True, help='window name to monitor')

ERODE_KERNEL = np.ones((5, 5), "uint8")
OPEN_KERNEL = np.ones((3, 3), "uint8")

LINE_KERNEL = np.ones((2, 10), "uint8")

def resize(img, height = -1, width = -1):
    o_h, o_w = img.shape[0], img.shape[1]
    if height > 0:
        new_height = int(height)
        new_width = int(new_height / o_h * o_w)
        return cv.resize(img, (new_width, new_height))
    elif width > 0:
        new_width = int(width)
        new_height = int(new_width / o_w * o_h)
        return cv.resize(img, (new_width, new_height))
    else:
        return img

def showImage(img, winname='Default', height = -1, width = -1):
    cv.namedWindow(winname, cv.WINDOW_AUTOSIZE)
    cv.imshow(winname, resize(img, height, width))
    cv.waitKey(0)
    cv.destroyWindow(winname)

def do_sobel(img):
    sobelx = cv.Sobel(img, -1, 1, 0)
    sobelx = cv.convertScaleAbs(sobelx)
    sobely = cv.Sobel(img, -1, 0, 1)
    sobely = cv.convertScaleAbs(sobely)
    result = cv.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
    return result

def reserve_rectangle_contour(contours, epsilon):
    box_contours = []
    for contour in contours:
        pts = cv.approxPolyDP(contour, epsilon, True)
        if len(pts) == 4:   
            box_contours.append(contour)
    return box_contours
    
def detect_messagebox(img, debug=False):
    shape = img.shape
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # find message box
    edge = cv.Canny(img, 100, 200)
    contours, _ = cv.findContours(edge, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        return None
    if debug:
        show_image = img.copy()
        cv.drawContours(show_image, contours, -1, (0, 255, 0), thickness=3)
        showImage(show_image, width=1200)
    epsilon_gap = min(shape[0], shape[1]) / 6
    box_contours = reserve_rectangle_contour(contours, epsilon_gap)
    if len(box_contours) == 0:
        return None

    zero_mask = np.zeros_like(img)
    # get mask
    cv.fillPoly(zero_mask, box_contours, (255, 255, 255))
    zero_mask = cv.cvtColor(zero_mask, cv.COLOR_BGR2GRAY)
    _, zero_mask = cv.threshold(zero_mask, 10, 255, cv.THRESH_BINARY)
    zero_mask = cv.erode(zero_mask, kernel=ERODE_KERNEL)
    if debug:
        showImage(zero_mask, width=1200)    
    contours, _ = cv.findContours(zero_mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    if len(contours) != 1:
        print("[WARNING]potential number of contours isn't one, which may lead some problem")
        # calculate area to decide which one to reserve
        areas = []
        for contour in contours:
            areas.append(cv.contourArea(contour))
        index = np.argmax(areas)
        points = contours[index]
    else:
        points = contours[0]
    pts = np.array(points)
    xs = pts[..., 0]
    ys = pts[..., 1]
    x1, x2 = xs.min(), xs.max()
    y1, y2 = ys.min(), ys.max()
    if debug:
        cv.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
        showImage(img, width=1200)
    # I think the area must be within a certain range
    ratio = (y2 - y1) * (x2 - x1) / (shape[1] * shape[0])
    if ratio > 0.15 or ratio < 0.05:
        return None
    return x1, y1, x2, y2

def get_window_pos(name):
    handle = win32gui.FindWindow(0, name)
    if handle == 0:
        return None
    else:
        return win32gui.GetWindowRect(handle), handle

def convertQImageToMat(incomingImage):
    incomingImage = incomingImage.convertToFormat(4)

    width = incomingImage.width()
    height = incomingImage.height()

    ptr = incomingImage.bits()
    ptr.setsize(incomingImage.byteCount())
    arr = np.array(ptr).reshape(height, width, 4)  # Copies the data
    return arr

def get_image(hwnd):
    app = QApplication(sys.argv)
    screen = QApplication.primaryScreen()
    img = screen.grabWindow(hwnd).toImage()
    return convertQImageToMat(img)

def detect_and_click():
    img = cv.imread("image/test1.jpg")

    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    edge = cv.Canny(gray, 100, 200)
    showImage(edge, width=1200)

    result = detect_messagebox(img)
    if result is None:
        print("no operation")
        return 0
    
    # find the position of the button
    x1, y1, x2, y2 = result
    message_box = img[y1: y2 + 1, x1 : x2 + 1]
    gray_box = cv.cvtColor(message_box, cv.COLOR_BGR2GRAY)
    grad_box = do_sobel(gray_box)
    _, binary = cv.threshold(grad_box, 10, 255, cv.THRESH_BINARY)
    binary = cv.dilate(binary, kernel=OPEN_KERNEL)
    binary = cv.medianBlur(binary, ksize=3)
    binary = cv.erode(binary, kernel=OPEN_KERNEL)
    contours, _ = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    epsilon_gap = (y2 - y1) / 100
    contours = reserve_rectangle_contour(contours, epsilon_gap)
    pts = np.array(contours)
    xs = pts[..., 0]
    ys = pts[..., 1]
    b_x1, b_x2 = xs.min(), xs.max()
    b_y1, b_y2 = ys.min(), ys.max()
    print(b_x2 - b_x1, b_y2 - b_y1)

    cv.rectangle(img, (x1, y1), (x2, y2), (198, 198, 0), 3)
    cv.rectangle(img, (x1 + b_x1, y1 + b_y1), (x1 + b_x2, y1 + b_y2), (0, 255, 0), 3)

    click_pts = (
        ((x1 << 1) + b_x1 + b_x2) >> 1,
        ((y1 << 1) + b_y1 + b_y2) >> 1
    )

    cv.circle(img, click_pts, 10, (0, 198, 198), thickness=3)
    showImage(img, width=1200)


def u_process_list():
    img = cv.imread("image/test2.jpg")

    h, w = img.shape[0], img.shape[1]
    roi_x1 = int(w / 3 * 2)
    roi_x2 = int(w)
    roi_y1 = int(h / 50 * 1)
    roi_y2 = int(h / 50 * 49)

    roi_img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]
    # showImage(roi_img, height=1000)
    gray = cv.cvtColor(roi_img, cv.COLOR_BGR2GRAY)

    edge = cv.Canny(gray, 10, 200)
    edge[..., -1] = 255
    # showImage(edge, height=1000)    

    contours, _ = cv.findContours(edge, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
    epsilon_gap = h / 20
    contours = reserve_rectangle_contour(contours, epsilon_gap)
    roi_contour = None
    for contour in contours:
        area = cv.contourArea(contour)
        ratio = area / (h * w)
        if ratio > 0.11 and ratio < 0.18:
            roi_contour = contour
    if roi_contour is None:
        return []

    pts = np.array(roi_contour)
    xs = pts[..., 0]
    ys = pts[..., 1]
    list_x1, list_x2 = xs.min(), xs.max()
    list_y1, list_y2 = ys.min(), ys.max()


    roi_x1 = roi_x1 + list_x1
    roi_x2 = roi_x1 + list_x2
    roi_y1 = roi_y1 + list_y1
    roi_y2 = roi_y1 + list_y2
    roi_img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]

    gray = cv.cvtColor(roi_img, cv.COLOR_BGR2GRAY)
    gray = cv.GaussianBlur(gray, ksize=(3, 3), sigmaX=2)
    edge = cv.Canny(gray, 10, 20)
    # showImage(edge)

    copy = roi_img.copy()

    # detect ring use hough
    min_r = int(w / 200)
    max_r = int(w / 120)
    print(min_r, max_r)
    rings = cv.HoughCircles(edge, cv.HOUGH_GRADIENT, 1, 5, param1=120, param2=10, minRadius=min_r, maxRadius=max_r)
    if len(rings[0]) == 1:
        ring = rings[0][0]
    else:
        distances = []
        for ring in rings[0]:
            x = int(ring[0])
            y = int(ring[1])
            r = int(ring[2])
            dis = np.linalg.norm(np.array([18, 4, 236]) - np.array(roi_img[y, x]))
            distances.append(dis)
        
        index = np.argmin(distances)
        ring = rings[0][index]

    ring = [int(v) for v in ring]
    cv.circle(roi_img, (ring[0], ring[1]), ring[2], (0, 255, 0), 2)
    showImage(roi_img)

    # detect line use hough
    lines = cv.HoughLines(edge, 1, np.pi / 8, 200, min_theta=np.pi / 4, max_theta=np.pi / 4 * 3)
    line_xxy = []
    for line in lines:
        rho, theta = line[0]
        a = np.cos(theta)
        b = np.sin(theta)
        x0 = a * rho
        y0 = b * rho
        x1 = int(x0 - 1000 * b)
        y1 = int(y0 + 1000 * a)
        x2 = int(x0 + 1000 * b)      
        y2 = int(y0 - 1000 * a)
        y = int((y1 + y2) / 2)
        line_xxy.append([x1, x2, y])

    line_xxy.sort(key=lambda l : l[-1])
    new_xxy = []
    min_h_epsilon = h / 120
    max_h_epislon = h / 5
    for xxy in line_xxy:
        x1, x2, y = xxy     
        if len(new_xxy) == 0:
            new_xxy.append([x1, x2, y])
        else:
            gap = y - new_xxy[-1][-1]
            if gap < min_h_epsilon:
                new_xxy[-1][0] = int((new_xxy[-1][0] + x1) / 2)        
                new_xxy[-1][1] = int((new_xxy[-1][1] + x2) / 2)        
                new_xxy[-1][2] = int((new_xxy[-1][2] +  y) / 2)        
            elif gap > max_h_epislon:
                pass
            else:
                new_xxy.append([x1, x2, y])
    
    for xxy in new_xxy:
        x1, x2, y = xxy
        if y > ring[1]:
            cv.line(copy, (x1, y), (x2, y), (0, 255, 0), 3)
            break
        # cv.line(copy, (x1, y), (x2, y), (0, 255, 0), 3)
    else:
        print("no next item")
        return []

    tx = int(roi_img.shape[1] / 2)
    ty = int(2 * y - ring[1])

    cv.circle(roi_img, (tx, ty), 5, (255, 0, 0), 3)
    showImage(roi_img, height=1000)

    x = roi_x1 + tx 
    y = roi_y1 + ty
    cv.circle(img, (x, y), 5, (255, 0, 0), 3)
    showImage(img, width=1200)

    return x, y

def dense_detect(img):
    h, w = img.shape[0], img.shape[1]
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    edge = cv.Canny(gray, 100, 200)
    contours, _ = cv.findContours(edge, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
    flag_number = len(contours)
= 
    print(len(contours))
    cv.drawContours(img, contours, -1, (0, 255, 0), 3)
    showImage(edge)
    showImage(img)


def compare():
    img1 = cv.imread("image/test11.jpg")
    img2 = cv.imread("image/test2.jpg")

    result = detect_messagebox(img1)
    x1, y1, x2, y2 = result
    roi_img1 = img1[y1 : y2 + 1, x1 : x2 + 1]

    img2 = cv.imread("image/test1.jpg")
    result = detect_messagebox(img2)
    x1, y1, x2, y2 = result
    roi_img2 = img2[y1 : y2 + 1, x1 : x2 + 1]

    showImage(roi_img1)
    showImage(roi_img2)

    dense_detect(roi_img1)
    dense_detect(roi_img2)



if __name__ == "__main__":
    # detect_and_click()
    compare()
    # u_process_list()
