# This is a sample Python script.
import random
import time
from datetime import datetime
from functools import reduce

# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
import cv2 as cv
import virtualvideo
import websocket
import json
from PIL import Image, ImageFont, ImageDraw
import numpy as np
from queue import Queue
from copy import deepcopy

import threading as thread

q = Queue(512)
colors = [
    (0xf5, 0xde, 0xb3),  # 未走过的路
    (0x8a, 0x2b, 0xe2),  # 墙壁
    (0xd3, 0xdb, 0x64),  # 当前位置
    (0x12, 0x8b, 0x37),  # 走过
    (0x32, 0x12, 0x8b),  # 重复经过
    (0xff, 0x00, 0x00),
    (0xff, 0x00, 0x00),
    (0xff, 0x00, 0x00),
    (0xff, 0x00, 0x00),
    (0xc4, 0xc4, 0xc4),  # 迷雾
    (0xff, 0x00, 0x00),
    (0x00, 0xff, 0xff),
    (0xff, 0xff, 0x00),
]


# Construct 3D rotation matrix when rotations around x,y,z axes are specified
def construct_RotationMatrixHomogenous(rotation_angles):
    assert (type(rotation_angles) == list and len(rotation_angles) == 3)
    RH = np.eye(4, 4)
    cv.Rodrigues(np.array(rotation_angles), RH[0:3, 0:3])
    return RH


# https://en.wikipedia.org/wiki/Rotation_matrix
def getRotationMatrixManual(rotation_angles):
    rotation_angles = list(map(lambda x: np.deg2rad(x), rotation_angles))

    phi = rotation_angles[0]  # around x
    gamma = rotation_angles[1]  # around y
    theta = rotation_angles[2]  # around z

    # X rotation
    Rphi = np.eye(4, 4)
    sp = np.sin(phi)
    cp = np.cos(phi)
    Rphi[1, 1] = cp
    Rphi[2, 2] = Rphi[1, 1]
    Rphi[1, 2] = -sp
    Rphi[2, 1] = sp

    # Y rotation
    Rgamma = np.eye(4, 4)
    sg = np.sin(gamma)
    cg = np.cos(gamma)
    Rgamma[0, 0] = cg
    Rgamma[2, 2] = Rgamma[0, 0]
    Rgamma[0, 2] = sg
    Rgamma[2, 0] = -sg

    # Z rotation (in-image-plane)
    Rtheta = np.eye(4, 4)
    st = np.sin(theta)
    ct = np.cos(theta)
    Rtheta[0, 0] = ct
    Rtheta[1, 1] = Rtheta[0, 0]
    Rtheta[0, 1] = -st
    Rtheta[1, 0] = st

    R = reduce(lambda x, y: np.matmul(x, y), [Rphi, Rgamma, Rtheta])

    return R


def getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sidelength):
    ptsIn2D = ptsIn[0, :]
    ptsOut2D = ptsOut[0, :]
    ptsOut2Dlist = []
    ptsIn2Dlist = []

    for i in range(0, 4):
        ptsOut2Dlist.append([ptsOut2D[i, 0], ptsOut2D[i, 1]])
        ptsIn2Dlist.append([ptsIn2D[i, 0], ptsIn2D[i, 1]])

    pin = np.array(ptsIn2Dlist) + [W / 2., H / 2.]
    pout = (np.array(ptsOut2Dlist) + [1., 1.]) * (0.5 * sidelength)
    pin = pin.astype(np.float32)
    pout = pout.astype(np.float32)

    return pin, pout


def warpImage(src, theta, phi, gamma, scale, fovy, corners=None):
    H, W, Nc = src.shape
    M, sl = warpMatrix(W, H, theta, phi, gamma, scale, fovy)  # Compute warp matrix
    sl = int(sl)
    print('Output image dimension = {}'.format(sl))
    dst = cv.warpPerspective(src, M, (sl, sl), borderMode=cv.BORDER_CONSTANT, borderValue=[0, 0, 0, 0])  # Do actual image warp
    return dst


def warpMatrix(W, H, theta, phi, gamma, scale, fV):
    # M is to be estimated
    M = np.eye(4, 4)

    fVhalf = np.deg2rad(fV / 2.)
    d = np.sqrt(W * W + H * H)
    sideLength = scale * d / np.cos(fVhalf)
    h = d / (2.0 * np.sin(fVhalf))
    n = h - (d / 2.0)
    f = h + (d / 2.0)

    # Translation along Z-axis by -h
    T = np.eye(4, 4)
    T[2, 3] = -h

    # Rotation matrices around x,y,z
    R = getRotationMatrixManual([phi, gamma, theta])

    # Projection Matrix
    P = np.eye(4, 4)
    P[0, 0] = 1.0 / np.tan(fVhalf)
    P[1, 1] = P[0, 0]
    P[2, 2] = -(f + n) / (f - n)
    P[2, 3] = -(2.0 * f * n) / (f - n)
    P[3, 2] = -1.0

    # pythonic matrix multiplication
    F = reduce(lambda x, y: np.matmul(x, y), [P, T, R])

    # shape should be 1,4,3 for ptsIn and ptsOut since perspectiveTransform() expects data in this way.
    # In C++, this can be achieved by Mat ptsIn(1,4,CV_64FC3);
    ptsIn = np.array([[
        [-W / 2., H / 2., 0.], [W / 2., H / 2., 0.], [W / 2., -H / 2., 0.], [-W / 2., -H / 2., 0.]
    ]])
    ptsOut = np.array(np.zeros((ptsIn.shape), dtype=ptsIn.dtype))
    ptsOut = cv.perspectiveTransform(ptsIn, F)

    ptsInPt2f, ptsOutPt2f = getPoints_for_PerspectiveTranformEstimation(ptsIn, ptsOut, W, H, sideLength)

    # check float32 otherwise OpenCV throws an error
    assert (ptsInPt2f.dtype == np.float32)
    assert (ptsOutPt2f.dtype == np.float32)
    M33 = cv.getPerspectiveTransform(ptsInPt2f, ptsOutPt2f)

    return M33, sideLength


class MazeClient:
    def __init__(self):
        print(__name__)

        # 计算接收消息的速率
        self.start_time = time.time()
        self.end_time = time.time()

    def run(self):

        websocket.enableTrace(False)

        ws = websocket.WebSocketApp("ws://192.168.0.3:9200/maze-endpoint/python",
                                    on_open=self.on_open,
                                    on_message=self.on_message,
                                    on_error=self.on_error,
                                    on_close=self.on_close)

        ws.run_forever(ping_timeout=5)

    def on_message(self, ws, message):

        """服务器有数据更新时，主动推送过来的数据"""
        # print(message)
        msg = json.loads(message)
        # print(msg.get("step"))

        self.end_time = time.time()
        time_diff = self.end_time - self.start_time
        mps = round(1 / time_diff, 2)

        def run(*args):
            if msg.get("action") == "subscribe-accept-handler":
                request_next_frame = {
                    "action": "request-next-frame-handler",
                    "recordId": msg.get("recordId")
                }

                ws.send(json.dumps(request_next_frame))  # 发送数据(必须为str类型)

            if msg.get("action") == "response-next-frame-handler":
                maze = np.array(msg.get("maze"), dtype='int8')
                mazeSolver = msg.get("mazeSolver")
                mazeGenerator = msg.get("mazeGenerator")
                solveTime = msg.get("solveTime")
                step = msg.get("step")
                mpsStr = args[0]

                im = Image.fromarray(maze)
                im = im.convert('P')  # P代表索引图片
                palette = np.array(colors).reshape(-1).tolist()
                im.putpalette(palette)
                # 放大到定义尺寸 70倍
                # scale = 70
                # resized_img = im.resize((im.width * scale, im.height * scale), Image.BICUBIC)
                receivedAt = datetime.now().strftime("%X")

                # 缓存迷宫数据
                q.put({
                    "im": im,
                    "mazeGenerator": mazeGenerator,
                    "mazeSolver": mazeSolver,
                    "solveTime": solveTime,
                    "receivedAt": receivedAt,
                    "step": str(step),
                    "mps": str(mpsStr)
                })

                request_next_frame = {
                    "action": "request-next-frame-handler",
                    "recordId": msg.get("recordId")
                }

                ws.send(json.dumps(request_next_frame))  # 发送数据(必须为str类型)

            self.start_time = time.time()
            time.sleep(0.001)

        thread.Thread(target=run, args=(mps,)).start()

    def on_error(self, ws, error):

        """程序报错时，就会触发on_error事件"""

        print(error)

    def on_close(self, ws):

        """关闭websocket连接后，打印此消息"""

        print("### closed ###")

    def on_open(self, ws):

        """连接到服务器之后就会触发on_open事件，这里用于send数据"""

        def run(*args):
            subscribe = {
                "action": "challenge-live-request-handler",
                "recordId": 123
            }

            ws.send(json.dumps(subscribe))  # 发送数据(必须为str类型)

            time.sleep(1)
            #
            # ws.close()  # 关闭连接

        thread.Thread(target=run, args=()).start()


class MyVideoSource(virtualvideo.VideoSource):
    def __init__(self):
        self.img = np.full((41, 41, 3), (0xe2, 0x2b, 0x8a), dtype=np.uint8)
        self.background = np.full((738, 1312, 3), 255, dtype=np.uint8)
        self.text_bg = np.full((738, (1312 - 738), 3), 255, dtype=np.uint8)
        self.font = ImageFont.truetype("wqy/wqy-zenhei.ttc", 24)
        self._scale = 18
        size = self.img.shape
        # opencv's shape is y,x,channels
        self._size = (1312, 738)
        self.initTime = time.time()
        self.last_frame = self.img
        self.last_text = None
        self.frame_count = 0
        self.origin_maze = None
        self.generating = False
        self.delta = 0

    def img_size(self):
        return self._size

    def fps(self):
        return 60

    def tips(self, img, position, line1, line2=None, line3=None, line4=None):
        imgPIL = Image.fromarray(cv.cvtColor(img, cv.COLOR_BGR2RGB))
        draw = ImageDraw.Draw(imgPIL)
        draw.text(position, line1, font=self.font, fill=(160, 255, 100))
        if line2 is not None:
            draw.text((41, 41 * 2), line2, font=self.font, fill=(128, 0, 128))
        if line3 is not None:
            draw.text((41, 41 * 3), line3, font=self.font, fill=(207, 207, 196))
        if line4 is not None:
            draw.text((41, 41 * 4), line4, font=self.font, fill=(207, 207, 196))
        del draw
        return cv.cvtColor(np.asarray(imgPIL), cv.COLOR_RGB2BGR)

    def generator(self):
        # 计算实时的帧率
        start_time = time.time()
        while True:
            for i in range(25):
                frame = self.img if self.last_frame is None else self.last_frame
                text = np.copy(self.text_bg)
                if q.not_empty:
                    try:
                        data = q.get(timeout=1)  # timeout单位秒, 线程阻塞等待时间, None表示永远阻塞

                        continueTime = time.time() - self.initTime

                        # 计算帧率
                        end_time = time.time()
                        time_diff = end_time - start_time
                        real_fps = 1 / time_diff

                        im = data["im"]

                        resized_img = im.convert("RGB")
                        frame = cv.cvtColor(np.asarray(resized_img), cv.COLOR_RGB2BGR)
                        # 缓存上一帧
                        self.last_frame = deepcopy(frame)

                        content = data["mazeSolver"]
                        mazeGenerator = data["mazeGenerator"]
                        solveTime = None
                        if data["solveTime"] is not None:
                            self.generating = False
                            solveTime = "Solved at " + datetime.fromtimestamp(data["solveTime"] / 1000).strftime("%X")
                        else:
                            self.generating = True
                            self.origin_maze = deepcopy(frame)
                        now = "Now " + datetime.now().strftime("%X")
                        receivedAt = "Received at " + data["receivedAt"]
                        step = data["step"] + " steps"
                        mps = data["mps"] + " mps"

                        self.frame_count += 1
                        print(str(self.frame_count))

                        line1 = mazeGenerator
                        if content is not None:
                            line1 = mazeGenerator + " vs " + content

                        line3 = receivedAt
                        if solveTime is not None:
                            line3 = solveTime + " - " + receivedAt

                        text = self.tips(
                            text,
                            (41, 41),
                            line1,
                            step + ", " + str(round(real_fps, 2)) + " fps, " + mps,
                            line3,
                            now + ", +" + str(int(continueTime)) + "s"
                        )
                        self.last_text = deepcopy(text)
                    except Exception as e:
                        print(e)
                        text = np.copy(self.text_bg) if self.last_text is None else self.last_text
                        pass

                try:
                    # 放大到定义尺寸 16 : 9
                    frame = cv.resize(frame, (41 * self._scale, 41 * self._scale), interpolation=cv.INTER_NEAREST)
                    self.background[0:41 * self._scale, 0:41 * self._scale, :] = frame
                    self.background[0:41 * self._scale, 41 * self._scale:1312, :] = text

                    self.delta = self.delta + 1 if self.delta < 50 else 0
                    if self.origin_maze is not None:
                        resized_origin_maze = cv.resize(self.origin_maze, (41 * 3, 41 * 3),
                                                        interpolation=cv.INTER_NEAREST)
                        test = warpImage(resized_origin_maze, 0, 0, 45, 0.5, 53)

                        y1 = 41 * self._scale - test.shape[0] - 41
                        y2 = 41 * self._scale - 41
                        x1 = 41 * self._scale + int((1312 - 41 * (self._scale + 3)) / 2) + self.delta
                        x2 = x1 + test.shape[0]
                        self.background[y1:y2, x1:x2, :] = test
                        self.background[y1:y2, x1+40:x2+40, :] = deepcopy(test)
                        del test

                    # 寻路的时候, 显示生成的完整迷宫
                    if not self.generating and self.origin_maze is not None:
                        resized_origin_maze = cv.resize(self.origin_maze, (41 * 3, 41 * 3),
                                                        interpolation=cv.INTER_NEAREST)
                        y1 = 41 * self._scale - 41 * 4
                        y2 = 41 * self._scale - 41
                        x1 = 41 * self._scale + int((1312 - 41 * (self._scale + 3)) / 2)
                        x2 = x1 + 41 * 3
                        self.background[y1:y2, x1:x2, :] = resized_origin_maze
                        del resized_origin_maze

                    if self.background is None:
                        print("background is None")

                    yield self.background
                except Exception as e:
                    print(e)
                    pass

                start_time = time.time()

                # 单位是秒, 如果设置为1, 则等待1秒, 这样会影响视频帧率, 1秒1帧
                time.sleep(0.0016)
            # for i in range(1, 500, 1):
            # processes the image a little bit
            # x = abs(50-i)
            # yield cv.blur(self.img,(x,x))
            # fn = "%06d" % i
            # yield cv.imread("/home/xlj/1710302140578349058/multi_ze_" + fn + ".png")


def print_hi(name):
    # Use a breakpoint in the code line below to debug your script.
    print(f'Hi, {name}')  # Press Ctrl+F8 to toggle the breakpoint.

    mazeClient = MazeClient()

    vidsrc = MyVideoSource()
    fvd = virtualvideo.FakeVideoDevice()
    fvd.init_input(vidsrc)
    fvd.init_output(2, 1280, 720, fps=60)

    thread.Thread(target=mazeClient.run).start()
    fvd.run(quiet=True)


# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    print(cv.cuda.getCudaEnabledDeviceCount())
    print_hi('PyCharm')

# See PyCharm help at https://www.jetbrains.com/help/pycharm/
