from operator import le, truediv
from typing import no_type_check_decorator
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *

import cv2 as cv
import numpy as np
import win32gui
import win32com.client
import win32gui
import win32api
import win32con
import win32ui
import os, sys, time

import _thread

WIDTH = 1500

def u_debug(text):
    print("\033[32m[DEBUG]\033[0m{}".format(text))

# system api
def query_handle_info() -> dict:
    hwnd_title = dict()
    def _get_all_hwnd(hwnd, mouse):
        if win32gui.IsWindow(hwnd) and win32gui.IsWindowEnabled(hwnd) and win32gui.IsWindowVisible(hwnd):
            hwnd_title.update({hwnd: win32gui.GetWindowText(hwnd)})

    win32gui.EnumWindows(_get_all_hwnd, 0)
    return hwnd_title

def set_foreground(hwnd):
    """
        将窗口设置为最前面
    :param hwnd: 窗口句柄 一个整数
    """
    if hwnd != win32gui.GetForegroundWindow():
        shell = win32com.client.Dispatch("WScript.Shell")
        shell.SendKeys('%')
        win32gui.SetForegroundWindow(hwnd)

# take the shot
def take_shot(hwnd : int) -> np.ndarray:
    bmpFileName = 'temp.bmp'

    r = win32gui.GetWindowRect(hwnd)
    hwin = win32gui.GetDesktopWindow()
    left = win32api.GetSystemMetrics(win32con.SM_XVIRTUALSCREEN)
    top = win32api.GetSystemMetrics(win32con.SM_YVIRTUALSCREEN)
    hwindc = win32gui.GetWindowDC(hwin)
    srcdc = win32ui.CreateDCFromHandle(hwindc)
    memdc = srcdc.CreateCompatibleDC()
    bmp = win32ui.CreateBitmap()
    bmp.CreateCompatibleBitmap(srcdc, r[2] - r[0], r[3] - r[1])
    memdc.SelectObject(bmp)
    memdc.BitBlt((-r[0], top - r[1]), (r[2], r[3] - top), srcdc, (left, top), win32con.SRCCOPY)
    bmp.SaveBitmapFile(memdc, bmpFileName)
    
    img = cv.imread(bmpFileName)
    os.remove(bmpFileName)
    return img

def click_here(pts):
    win32api.SetCursorPos(pts) 
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
    time.sleep(1)
    win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)

# cv
ERODE_KERNEL = np.ones((5, 5), "uint8")
OPEN_KERNEL = np.ones((3, 3), "uint8")

def resize(img, height = -1, width = -1):
    o_h, o_w = img.shape[0], img.shape[1]
    if height > 0:
        new_height = int(height)
        new_width = int(new_height / o_h * o_w)
        return cv.resize(img, (new_width, new_height))
    elif width > 0:
        new_width = int(width)
        new_height = int(new_width / o_w * o_h)
        return cv.resize(img, (new_width, new_height))
    else:
        return img

def showImage(img, winname='Default', height = -1, width = -1):
    cv.namedWindow(winname, cv.WINDOW_AUTOSIZE)
    cv.imshow(winname, resize(img, height, width))
    cv.waitKey(0)
    cv.destroyWindow(winname)

def do_sobel(img):
    sobelx = cv.Sobel(img, -1, 1, 0)
    sobelx = cv.convertScaleAbs(sobelx)
    sobely = cv.Sobel(img, -1, 0, 1)
    sobely = cv.convertScaleAbs(sobely)
    result = cv.addWeighted(sobelx, 0.5, sobely, 0.5, 0)
    return result

def reserve_rectangle_contour(contours, epsilon):
    box_contours = []
    for contour in contours:
        pts = cv.approxPolyDP(contour, epsilon, True)
        if len(pts) == 4:   
            box_contours.append(contour)
    return box_contours
    
def detect_messagebox(img, debug=False):
    shape = img.shape
    gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
    # find message box
    gray = cv.GaussianBlur(gray, ksize=(5, 5), sigmaX=1)
    if debug:
        showImage(gray, width=WIDTH)
    _, threshold = cv.threshold(gray, 30, 255, cv.THRESH_BINARY)
    threshold = cv.erode(threshold, ERODE_KERNEL, iterations=12)
    threshold = cv.dilate(threshold, ERODE_KERNEL, iterations=12)
    threshold = np.array(threshold / 255).astype("uint8")
    gray = cv.multiply(gray, threshold)
    if debug:
        showImage(threshold)
        showImage(gray)
    
    gray = cv.medianBlur(gray, ksize=3)
    edge = cv.Canny(gray, 80, 200)

    if debug:
        showImage(edge, width=WIDTH)
    contours, _ = cv.findContours(edge, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        return None
    if debug:
        show_image = img.copy()
        cv.drawContours(show_image, contours, -1, (0, 255, 0), thickness=3)
        showImage(show_image, width=WIDTH)
    epsilon_gap = min(shape[0], shape[1]) / 6
    box_contours = reserve_rectangle_contour(contours, epsilon_gap)
    if debug:
        u_debug("box contours number : {}".format(len(box_contours)))
        show_image = img.copy()
        cv.drawContours(show_image, box_contours, -1, (0, 255, 0), 3)
        showImage(show_image, width=WIDTH)

    if len(box_contours) == 0:
        return None

    zero_mask = np.zeros_like(img)
    # get mask
    cv.fillPoly(zero_mask, box_contours, (255, 255, 255))
    zero_mask = cv.cvtColor(zero_mask, cv.COLOR_BGR2GRAY)
    _, zero_mask = cv.threshold(zero_mask, 10, 255, cv.THRESH_BINARY)
    zero_mask = cv.erode(zero_mask, kernel=ERODE_KERNEL)
    if debug:
        showImage(zero_mask, width=WIDTH)    
    contours, _ = cv.findContours(zero_mask, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    if debug:   
        u_debug("contours num : {}".format(len(contours)))

    if len(contours) == 0:
        return None
    if len(contours) != 1:
        print("[WARNING]potential number of contours isn't one, which may lead some problem")
        # calculate area to decide which one to reserve
        areas = []
        for contour in contours:
            areas.append(cv.contourArea(contour))
        index = np.argmax(areas)
        points = contours[index]
    else:
        points = contours[0]
    pts = np.array(points)
    xs = pts[..., 0]
    ys = pts[..., 1]
    x1, x2 = xs.min(), xs.max()
    y1, y2 = ys.min(), ys.max()
    if debug:
        cv.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 3)
        showImage(img, width=WIDTH)
    # I think the area must be within a certain range
    ratio = (y2 - y1) * (x2 - x1) / (shape[1] * shape[0])
    if debug:
        u_debug("filter ratio {}".format(ratio))
    if ratio > 0.30 or ratio < 0.05:
        return None
    return x1, y1, x2, y2

def destroy_detect_messagebox(img, debug=False, rx1=200, ry1=200, rx2=200, ry2=200):
    roi_img = img[ry1 : ry2 + 1, rx1 : rx2 + 1]
    result = detect_messagebox(roi_img, debug=debug)
    if result is None:
        return result
    x1, y1, x2, y2 = result
    return x1 + rx1, y1 + ry1, x2 + rx1, y2 + ry1

def get_click_points(img, debug=False, use_destroy=False):
    if not use_destroy:
        result = detect_messagebox(img, debug=debug)
    else:
        rx1 = int(img.shape[1] / 20 * 4)
        ry1 = int(img.shape[0] / 20 * 4)
        rx2 = int(img.shape[1] / 20 * 16)
        ry2 = int(img.shape[0] / 20 * 16)
        result = destroy_detect_messagebox(img, debug, rx1, ry1, rx2, ry2)
    if result is None:
        print("no operation")
        return []
    
    # find the position of the button
    x1, y1, x2, y2 = result
    message_box = img[y1: y2 + 1, x1 : x2 + 1]
    if debug:
        showImage(message_box, width=1200)
    gray_box = cv.cvtColor(message_box, cv.COLOR_BGR2GRAY)
    grad_box = do_sobel(gray_box)
    _, binary = cv.threshold(grad_box, 10, 255, cv.THRESH_BINARY)
    binary = cv.dilate(binary, kernel=OPEN_KERNEL)
    binary = cv.medianBlur(binary, ksize=3)
    binary = cv.erode(binary, kernel=OPEN_KERNEL)
    contours, _ = cv.findContours(binary, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
    if len(contours) == 0:
        return []
    epsilon_gap = (y2 - y1) / 100
    contours = reserve_rectangle_contour(contours, epsilon_gap)

    if debug:
        show_image = message_box.copy()
        cv.drawContours(show_image, contours, -1, (0, 255, 0), 3)

    if len(contours) == 1:
        points = contours[0]
    else:
        for contour in contours:
            area = cv.contourArea(contour)
            ratio = area / (message_box.shape[0] * message_box.shape[1])
            if ratio > 0.025 and ratio < 0.04:
                points = contour
                break
        else:
            print("no button detected")
            return []

    pts = np.array(points)
    xs = pts[..., 0]
    ys = pts[..., 1]
    b_x1, b_x2 = xs.min(), xs.max()
    b_y1, b_y2 = ys.min(), ys.max()
    if debug:
        # print(b_x2 - b_x1, b_y2 - b_y1)
        cv.rectangle(img, (x1, y1), (x2, y2), (198, 198, 0), 3)
        cv.rectangle(img, (x1 + b_x1, y1 + b_y1), (x1 + b_x2, y1 + b_y2), (0, 255, 0), 3)

    message_click_pts = (
        ((x1 << 1) + b_x1 + b_x2) >> 1,
        ((y1 << 1) + b_y1 + b_y2) >> 1
    )

    # calculate contour number of message
    gray_box = cv.GaussianBlur(gray_box, ksize=(3, 3), sigmaX=1)
    edge = cv.Canny(gray_box, 100, 200)
    contours, _ = cv.findContours(edge, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
    contour_num = len(contours)

    if debug:
        cv.circle(img, message_click_pts, 10, (0, 198, 198), thickness=3)
        showImage(img, width=WIDTH)

    return [message_click_pts, contour_num]


class dummy_ui(QWidget):
    def __init__(self):
        super().__init__()
        self.resize(300, 100)
        self.setWindowTitle("AutoClick v-1.0")
        self.font =  QFont()
        self.font.setFamily("Microsoft YaHei")

        hbox1 = QHBoxLayout()
        hbox2 = QHBoxLayout()
        vbox = QVBoxLayout()
        self.label = QLabel("NodeHandle")
        self.btn1 = QPushButton("test")
        self.btn2 = QPushButton("stop")
        self.btn3 = QPushButton("refresh")
        self.btn4 = QPushButton("start")

        self.debug = False
        self.use_destroy = True
        self.stop = True

        self.message_box_click_pts = None

        self.checkBox = QCheckBox("Debug")
        self.checkBox.stateChanged.connect(self.update_debug_info)

        self.btn1.clicked.connect(self.detect_and_click)
        self.btn2.clicked.connect(self.stop_detect)
        self.btn3.clicked.connect(self.refresh)
        self.btn4.clicked.connect(self.launch_start_process)
        
        self.cb = QComboBox()
        self.refresh()

        hbox1.addWidget(self.label)
        hbox1.addWidget(self.cb)

        hbox2.addWidget(self.checkBox)
        hbox2.addWidget(self.btn1, stretch=0.1)
        hbox2.addWidget(self.btn4, stretch=0.1)
        hbox2.addWidget(self.btn2, stretch=0.1)
        hbox2.addWidget(self.btn3, stretch=0.1)

        vbox.addLayout(hbox1)
        vbox.addLayout(hbox2)

        self.setLayout(vbox)
        self.btn1.setFont(self.font)
        self.btn2.setFont(self.font)
        self.btn3.setFont(self.font)
        self.btn4.setFont(self.font)

        self.checkBox.setFont(self.font)
        self.label.setFont(self.font)
        self.cb.setFont(self.font)

    def refresh(self):
        self.cb.clear()
        handle_info = query_handle_info()
        info_str = []
        # str(node) + " " + (str(handle_info[node]) if len(handle_info[node]) else "Unknown handle")
        for node in handle_info:
            node_name = handle_info[node]
            if len(node_name) == 0:
                node_name = "Unknown NodeHandle"
            elif len(node_name) > 30:
                node_name = node_name[:27] + "..."
            info_str.append("{} {}".format(node, node_name))

        self.cb.addItems(info_str)

        
    def update_debug_info(self):
        self.debug = self.checkBox.isChecked()
    
    def stop_detect(self):
        self.stop = True
        self.btn4.setEnabled(True)

    def detect_and_click(self):
        img = self.get_current_screen()
        click_pts = get_click_points(img, debug=self.debug, use_destroy=self.use_destroy)
        if len(click_pts) == 0:
            return None

        message_pts = click_pts[0]
        contour_num = click_pts[1]
        u_debug("message box contour number:{}".format(contour_num))

        if self.debug:
            cv.circle(img, message_pts, 10, (198, 198, 0), 5)
            showImage(img, 'debug', width=WIDTH)
            cv.waitKey(10)
        self.message_box_click_pts = message_pts
        click_here(message_pts)
        return contour_num
    
    def detect_list(self, img):
        h, w = img.shape[0], img.shape[1]
        roi_x1 = int(w / 3 * 2)
        roi_x2 = int(w)
        roi_y1 = int(h / 50 * 1)
        roi_y2 = int(h / 50 * 49)

        roi_img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]
        # showImage(roi_img, height=1000)
        gray = cv.cvtColor(roi_img, cv.COLOR_BGR2GRAY)

        edge = cv.Canny(gray, 10, 200)
        edge[..., -1] = 255
        # showImage(edge, height=1000)    

        contours, _ = cv.findContours(edge, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
        epsilon_gap = h / 20
        contours = reserve_rectangle_contour(contours, epsilon_gap)
        roi_contour = None
        for contour in contours:
            area = cv.contourArea(contour)
            ratio = area / (h * w)
            if ratio > 0.11 and ratio < 0.18:
                roi_contour = contour
        if roi_contour is None:
            return []

        pts = np.array(roi_contour)
        xs = pts[..., 0]
        ys = pts[..., 1]
        list_x1, list_x2 = xs.min(), xs.max()
        list_y1, list_y2 = ys.min(), ys.max()

        roi_x1 = roi_x1 + list_x1
        roi_x2 = roi_x1 + list_x2
        roi_y1 = roi_y1 + list_y1
        roi_y2 = roi_y1 + list_y2
        roi_img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]

        gray = cv.cvtColor(roi_img, cv.COLOR_BGR2GRAY)
        gray = cv.GaussianBlur(gray, ksize=(3, 3), sigmaX=2)
        edge = cv.Canny(gray, 10, 20)
        # showImage(edge)

        copy = roi_img.copy()

        # detect ring use hough
        min_r = int(w / 200)
        max_r = int(w / 120)
        if self.debug:
            u_debug("HoughCircle : min_r={}, max_r={}".format(min_r, max_r))
        rings = cv.HoughCircles(edge, cv.HOUGH_GRADIENT, 1, 5, param1=120, param2=10, minRadius=min_r, maxRadius=max_r)
        if len(rings[0]) == 1:
            ring = rings[0][0]
        else:
            distances = []
            for ring in rings[0]:
                x = int(ring[0])
                y = int(ring[1])
                r = int(ring[2])
                dis = np.linalg.norm(np.array([18, 4, 236]) - np.array(roi_img[y, x]))
                distances.append(dis)
            
            index = np.argmin(distances)
            ring = rings[0][index]

        ring = [int(v) for v in ring]
        if self.debug:
            cv.circle(roi_img, (ring[0], ring[1]), ring[2], (0, 255, 0), 2)
            showImage(roi_img)

        # detect line use hough
        lines = cv.HoughLines(edge, 1, np.pi / 8, 200, min_theta=np.pi / 4, max_theta=np.pi / 4 * 3)
        line_xxy = []
        for line in lines:
            rho, theta = line[0]
            a = np.cos(theta)
            b = np.sin(theta)
            x0 = a * rho
            y0 = b * rho
            x1 = int(x0 - 1000 * b)
            y1 = int(y0 + 1000 * a)
            x2 = int(x0 + 1000 * b)      
            y2 = int(y0 - 1000 * a)
            y = int((y1 + y2) / 2)
            line_xxy.append([x1, x2, y])

        line_xxy.sort(key=lambda l : l[-1])
        new_xxy = []
        min_h_epsilon = h / 120
        max_h_epislon = h / 5
        for xxy in line_xxy:
            x1, x2, y = xxy     
            if len(new_xxy) == 0:
                new_xxy.append([x1, x2, y])
            else:
                gap = y - new_xxy[-1][-1]
                if gap < min_h_epsilon:
                    new_xxy[-1][0] = int((new_xxy[-1][0] + x1) / 2)        
                    new_xxy[-1][1] = int((new_xxy[-1][1] + x2) / 2)        
                    new_xxy[-1][2] = int((new_xxy[-1][2] +  y) / 2)        
                elif gap > max_h_epislon:
                    pass
                else:
                    new_xxy.append([x1, x2, y])
        
        for xxy in new_xxy:
            x1, x2, y = xxy
            if y > ring[1]:
                if self.debug:
                    cv.line(copy, (x1, y), (x2, y), (0, 255, 0), 3)
                break
            # cv.line(copy, (x1, y), (x2, y), (0, 255, 0), 3)
        else:
            print("no next item")
            return []

        tx = int(roi_img.shape[1] / 2)
        ty = int(2 * y - ring[1])
        if self.debug:
            cv.circle(roi_img, (tx, ty), 5, (255, 0, 0), 3)
            showImage(roi_img, height=1000)

        x = roi_x1 + tx 
        y = roi_y1 + ty
        if self.debug:
            cv.circle(img, (x, y), 5, (255, 0, 0), 3)
            showImage(img, width=1200)
        return x, y
    
    def try_next_video(self):
        img = self.get_current_screen()
        result = self.detect_list(img)
        if len(result) == 0:
            u_debug("there is no need to do next video")
        else:
            click_here(result)
        
    def launch_start_process(self):
        h, w = self.get_w_h()
        if h * w < 50:
            QMessageBox.critical(self, "invaild", "Please choose a valid NodeHandle!", QMessageBox.Yes)
            return

        self.btn4.setEnabled(False)
        self.stop = False
        _thread.start_new_thread(self.work_loop, (h, w))
    
    def get_current_screen(self):
        node = int(self.cb.currentText().split()[0])
        img = take_shot(node)
        return img 

    def get_w_h(self):
        img = self.get_current_screen()
        return img.shape[0], img.shape[1]
    
    def judge_jump(self):
        ...

    def work_loop(self, h, w):
        time_step = 0
        u_debug("begin thread 'work_loop', screen size:({}, {})".format(h, w))
        roi_x1 = int(w / 20 * 0)
        roi_y1 = int(h / 20 * 0)
        roi_x2 = int(w / 20 * 19)
        roi_y2 = int(w / 20 * 19)
        
        if self.debug:
            img = self.get_current_screen()
            img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]
            showImage(img)

        image_buff = [None, None, None]    # reserve three pictures for same checking
        while not self.stop:
            time_step += 1
            img = self.get_current_screen()
            img = img[roi_y1 : roi_y2 + 1, roi_x1 : roi_x2 + 1]
            image_buff[time_step % 3] = img

            if time_step > 4 and time_step % 5 == 0:
                # u_debug("current stop:{}".format(self.stop))
                unchange = (image_buff[0] == image_buff[1]).all() and (image_buff[0] == image_buff[2]).all() and (image_buff[1] == image_buff[2]).all()
                u_debug("whether screen is unchanged:[{}]".format(unchange))
                if unchange:
                    if self.message_box_click_pts:
                        click_here(self.message_box_click_pts)
                    else:
                        click_pts = (
                            int((roi_x1 + roi_x2) / 2),
                            int((roi_y1 + roi_y2) / 2)
                        )
                        click_here(click_pts)

            contour_num = self.detect_and_click()
            # if contour num is fewer than 300, than means the message box is mean the end of the video
            # we should shift the episode
            if contour_num is not None and contour_num < 300:
                time.sleep(1)
                img = self.get_current_screen()
                result = self.detect_list(img)
                if len(result) > 0:
                    x, y = result
                    click_here((x, y))

            time.sleep(1)
        
        u_debug("exit thread 'work_loop'")


if __name__ == "__main__":
    app = QApplication(sys.argv)
    main = dummy_ui()
    main.show()
    sys.exit(app.exec_())