'''
Iris, does following works:
detect a rectangle from a photo, and save its four points
then detecting a small black ball in it, and addressing it.
send the position using pyserial
'''
import sys
import threading
import queue
# from PyQt5.uic.properties import QtWidgets

import Iris_window
import sys
from PyQt5 import QtWidgets, QtGui, QtCore
from PyQt5.QtWidgets import *

import numpy as np
import cv2
import serial
import time

from assitant import *
from calibrat import *

# 2021/1/3
indebug = True
using_uart = True

save_mat = not True
read_mat = not False

# save_mat =  True
# read_mat =  False
# 2021/1/2
width = 400
height = 400
margin = 20
k_size = 20
k_xy = 0.5
d_xy = np.array([0.,0.])
max_weight = 400000
min_weight = 1000

# global line_strength,line_max_gap,line_max_gap
line_strength = 200
line_max_gap = 30
line_s = 200.0
element = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))

camera_width = 640
camera_height = 480
board_points = np.zeros([4, 2])  # [[xy],[xy],[xy],[xy]],top-left, top-right, bottom-right, bottom-right
transform_matrix = None

camera = None
uart = None
while True:
    try:
        camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW)
        break
    except:
        time.sleep(1000)
        if indebug:
            print('camera init failed')


# 判断是否为边缘
box_margin = 10
def judge(line, w1h0):
    if line[0] > line[2]:
        xmin, xmax = line[2], line[0]
    else:
        xmin, xmax = line[0], line[2]
    if line[1] > line[3]:
        ymin, ymax = line[3], line[1]
    else:
        ymin, ymax = line[1], line[3]
    if ymin < box_margin or ymax > camera_height - box_margin:
        return True
    if xmin < box_margin or xmax > camera_width - box_margin:
        return True
    return False
    # if w1h0:
    #     if ymin < box_margin or ymax > camera_height - box_margin:
    #         return True
    #     else:
    #         return False
    # else:
    #     if xmin < box_margin or xmax > camera_width - box_margin:
    #         return True
    #     else:
    #         return False


def init_para(img):
    global line_strength, line_max_gap
    c = 0
    max_try_times = 30
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    edges = cv2.Canny(gray, 50, 150, apertureSize=3)
    line_set = None

    while max_try_times > 0:
        max_try_times -= 1
        # assert edges.any() > 0
        line_set = cv2.HoughLinesP(edges, 5, np.pi / 150, int(line_strength), maxLineGap=int(line_max_gap),
                                   minLineLength=min_length_of_line)
        assert len(line_set) > 0

        # 函数将通过步长为1的半径和步长为π/180的角来搜索所有可能的直线
        line_set = np.array(line_set).reshape(-1, 4)
        line_set = merge_similar_ls(line_set)
        if indebug:
            img2 = img.copy()
            for line in line_set:
                cv2.line(img2, tuple(line[0:2]), tuple(line[2:4]), (255, 255, 0), 2)
            cv2.imshow("image_lines", img2)
            cv2.waitKey(1)

        # 横竖分类
        line_w = np.zeros((10, 4))
        line_h = np.zeros((10, 4))
        nx, ny = 0, 0
        # 分类
        for line in line_set:
            if abs(line[0] - line[2]) > abs(line[1] - line[3]):
                if judge(line, True):
                    x = 1
                    continue
                if nx > 9:
                    break
                line_w[nx] = line
                nx = nx + 1
            else:
                if judge(line, False):
                    x = 1
                    continue
                if ny > 9: break
                line_h[ny] = line
                ny = ny + 1

        # data1.3修改前
        # n = len(line_set)

        if nx > 4 or ny > 4:
            line_strength *= 1.1
            line_strength += np.random.random() * 5
            c = 0
        elif nx < 2 or ny < 2:
            line_strength *= 0.95
            # line_strength += np.random.random() * 5
            c = 0
            continue
        else:
            c += 1
            line_strength += np.random.random() * 2
            if c >= 3: break
    # data1.3
    lines = np.zeros((4, 4))
    xmin, ymin, xmax, ymax = 5000, 5000, 0, 0
    for line in line_w:
        # 横线
        yba = line[1] + line[3]
        if yba == 0: continue
        if yba > ymax:
            ymax = yba
            lines[1] = line
        if yba < ymin:
            ymin = yba
            lines[3] = line
    for line in line_h:
        xba = line[0] + line[2]
        if xba == 0: continue
        if xba > xmax:
            xmax = xba
            lines[2] = line
        if xba < xmin:
            xmin = xba
            lines[0] = line
    flag = True
    for line in lines:
        if line.all() == 0:
            flag = False
    if ymax - ymin < 100 or xmax - xmin < 100:
        flag = False
    return flag, lines


def search_ball(gray, last_pos=None, r_size = 100):
    warped = cv2.warpPerspective(gray, transform_matrix, (width, height))
    _, binary = cv2.threshold(warped, 130, 255, cv2.THRESH_BINARY)
    if last_pos is not None:
        x, y = last_pos
        w_h_f_t = x - r_size, x + r_size, y - r_size, y + r_size
        # sure width = height
        for i in range(4):  # saturation
            if w_h_f_t[i] < margin:
                w_h_f_t[i] = margin
            elif w_h_f_t[i] > width - margin:
                w_h_f_t[i] = width - margin
    else:
        w_h_f_t = [margin, width - margin, margin, height - margin]
    binary = binary[w_h_f_t[0]:w_h_f_t[1], w_h_f_t[2]:w_h_f_t[3]]
    # binary = binary[margin:width - margin, margin:height - margin]
    closing = cv2.morphologyEx(binary, cv2.MORPH_OPEN, element)
    # if closing[0][0]>200 and closing[int(len(closing)*0.8)][0]>200:
    #closing = cv2.bitwise_not(closing)
    if indebug:
        cv2.imshow("clo", closing)
    M = cv2.moments(closing)
    weight = M['m00']
    if weight > max_weight or weight < min_weight:
        return None, None
    x = M["m10"] / weight + w_h_f_t[0]
    y = M["m01"] / weight + w_h_f_t[2]

    if indebug:
        cv2.circle(warped, tuple(np.int0([x, y])), 8, [255, 255, 255])
        cv2.imshow("search board", warped)
    return x - width / 2, y - height / 2


def init_uart():
    ready = False
    path = "com"
    #path = '/dev/ttyUSB'
    usb_id = '6'
    while not ready:
        try:
            uart = serial.Serial(path + usb_id, 115200)
            ready = True

            task = threading.Thread(target=input_task, name='task1')
            task.start()

            # task2 = threading.Thread(target=listen_task, name='task2')
            # task2.start()
        except:
            if usb_id == '0':
                usb_id = '1'
            else:
                usb_id = '0'
            time.sleep(1000)
            if indebug:
                print('connect failed')
    return uart


def get_mat():
    global transform_matrix
    img = camera.read()
    ok = False
    lines = None
    while not ok:
        print("move the camera")
        _, img = camera.read()
        img = cal_photo(img)
        # using this img to find line strength
        ok, lines = init_para(img)

    point = lines_cross_points(lines)
    point = order_points(point)
    dst_rect = np.array([
        [0, 0],
        [width - 1, 0],
        [width - 1, height - 1],
        [0, height - 1]],
        dtype="float32")
    transform_matrix = cv2.getPerspectiveTransform(point, dst_rect)
    if save_mat:
        fn = 'mat.pkl'
        with open(fn, 'wb') as f:  # open file with write-mode
            pickle.dump(transform_matrix, f)  # serialize and save object


def main():
    global transform_matrix, uart
    camera.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
    camera.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
    if using_uart:
        uart = init_uart()

    if not read_mat:
        get_mat()
    else:
        with open('mat.pkl', 'rb') as f:
            transform_matrix = pickle.load(f)  # read file and build object

    # point = np.array([[139, 93], [405, 80], [460, 347], [138, 376]], dtype="float32")
    # dst_rect = np.array([
    #     [0, 0],
    #     [width - 1, 0],
    #     [width - 1, height - 1],
    #     [0, height - 1]],
    #     dtype="float32")
    # transform_matrix = cv2.getPerspectiveTransform(point, dst_rect)
    V2 = np.zeros(2)  # raw xy
    last_xy = np.zeros(2)  # after average fillite

    M_order = 2
    qv2 = queue.Queue(M_order)
    for i in range(M_order):
        qv2.put(V2)

    x0, y0, vx0, vy0, dt0 = 0, 0, 0, 0, 0
    lastt = time.time()
    using_local_search = False
    r_size = k_size
    while True:
        _, img = camera.read()
        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        if indebug:
            cv2.imshow("full scale gray", img)
        if using_local_search:
            raw_pos = [last_xy[1] + width // 2, height // 2 - last_xy[0]]
            y1, x1 = search_ball(img, raw_pos, r_size)
        else:
            y1, x1 = search_ball(img)

        if x1 is None:  # found no ball
            using_local_search = False
            cv2.waitKey(1)
            continue
        y1 = -y1
        V2[[0, 1]] = [x1, y1]  # raw data, xy

        V2 = V2* k_xy + d_xy
        # xy = last_xy + (V2 - qv2.get()) / M_order  # M order fillet
        # xy /= 2
        # qv2.put(V2)
        xy = (V2 + last_xy)/2
        x1, y1 = xy
        x0, y0 = last_xy
        last_xy[[0, 1]] = xy

        t = time.time()
        dt = t - lastt
        # if dt<0.15:
        #     continue
        lastt = t
        # if indebug:
        #     print('fps:%f' % (1 / dt))
        if dt>1:
            msg = 'cal %d,%d\n' % (x1,y1)
        else:
            dx , dy=x1-x0, y1-y0
            vx1, vy1 = (dx / dt, dy / dt)
            vx = (vx1 - vx0) * dt / (dt0 + dt) + vx1
            vy = (vy1 - vy0) * dt / (dt0 + dt) + vy1
            x, y = [x1 + vx1 * dt, y1 + vy1 * dt]

            vx0, vy0, dt0 = vx, vy, dt
            v = np.sqrt(vx0**2 + vy0**2)
            r_size = k_size + v/2
            if r_size>width/2:
                using_local_search = False
            msg = 'cal %d,%d,%d,%d\n' % (x, y, vx, vy)
        if using_uart:
            uart.write(msg.encode('ascii'))
            using_local_search = False
        if indebug:
            print(msg)
            cv2.waitKey(1)


def input_task():
    app = QtWidgets.QApplication(sys.argv)
    my_pyqt_form = Iris_window.Iris_Window(uart)
    my_pyqt_form.show()
    sys.exit(app.exec_())

    # while True:
    #     cmd = input(">")
    #     uart.write(cmd.encode('ascii'))


def listen_task():
    size = uart.isWaiting()
    if size > 0:
        msg = uart.read(size)
        print(msg)
    time.sleep(0.01)


# 按间距中的绿色按钮以运行脚本。
if __name__ == '__main__':
    main()
    # com?
    # se = serial.Serial('COM3', 9600, timeout=2)

    # img3 = img2.copy()
    # for p in point:
    #     cv2.circle(img3, tuple(p), 10, [0, 0, 0])
    # cv2.imshow("img2", img3)

    # cv2.circle(warped, tuple([x, y]), 10, [255, 255, 0])
    # cv2.imshow("ddd", warped)
    # cv2.waitKey()

    # if se.isOpen():
    #     print('串口已打开')
    #     data_inst = x*1000000000+y*1000000+vx*1000+vy
    #     data = '%d' % data_inst  # 发送的数据
    #     serial.write(data.encode())  # 串口写数据
    # print(x, y, vx, vy, dt)
    # cv2.destroyWindow("img2")
