import asyncio

from channels.consumer import AsyncConsumer
from channels.exceptions import StopConsumer
from channels.generic.websocket import WebsocketConsumer


# 要继承WebsocketConsumer类
# 至少要实现下面三个方法
# 这里的message参数为字典格式
# 访问方式
# message["your_"]
# {'type': 'websocket.receive', 'your_': 'text_message'}
class ChatConsumer(WebsocketConsumer):
    # 用户向后端发来websocket建立连接请求时自动执行
    def websocket_connect(self, message):
        # 接受所有websocket请求
        # 服务端允许和客户端建立连接
        self.accept()

        # 从websocket接收到消息时执行函数

    # 浏览器基于websocket向后端发送数据，自动触发接受消息
    def websocket_receive(self, message):
        print(message)
        self.send("不要回复不要回复不要回复")

        # websocket断开时执行方法

    # 客户端与服务端断开连接时，自动触发
    def websocket_disconnect(self, message):
        raise StopConsumer()


class PingConsumer(WebsocketConsumer):
    def websocket_connect(self, message):
        self.accept()

    def websocket_receive(self, message):

        self.up_image(message)
        self.send("上传成功")
        import json
        data = json.loads(message['text'])
        user = data['user']
        self.run(output_path=r"AI\output\{0}.mp4".format(user),
                 source_path=r"AI\Inputs\{0}.jpg".format(user))

    def websocket_disconnect(self, message):
        raise StopConsumer()

    def up_image(self, message):
        print(type(message['text']))
        import base64
        import json

        data = json.loads(message['text'])
        img_data = data['imgStr']
        with open(r"AI\Inputs\{0}.jpg".format(data['user']), "wb") as f:
            f.write(base64.b64decode(img_data.replace(" ", "+")))

    def run(self, source_path="AI/Inputs/feynman.jpeg", checkpoint_path="AI/extract/vox-cpk.pth.tar",
            video_path="AI/driver_video/myself.mp4", output_path="AI/output/my_resault.mp4"):
        import imageio
        import torch
        from AI.animate import normalize_kp
        from AI.demo import load_checkpoints
        import numpy as np
        from skimage import img_as_ubyte
        from skimage.transform import resize
        import cv2
        import os
        source_image = imageio.imread(source_path)
        source_image = resize(source_image, (256, 256))[..., :3]

        # 加载训练好的模型，获取这俩
        generator, kp_detector = load_checkpoints(config_path='AI/config/vox-256.yaml',
                                                  checkpoint_path=checkpoint_path)

        if not os.path.exists('output'):
            os.mkdir('output')

        relative = True
        adapt_movement_scale = True
        cpu = True

        if video_path:
            cap = cv2.VideoCapture(video_path)
            print("[INFO] Loading video from the given path")
        else:
            cap = cv2.VideoCapture(0)
            print("[INFO] Initializing front camera...")

        fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
        # 设置视频帧率
        out1 = cv2.VideoWriter(output_path, fourcc, 25, (256, 256), True)

        cv2_source = cv2.cvtColor(source_image.astype('float32'), cv2.COLOR_BGR2RGB)
        # torch.no_grad() 是一个上下文管理器，被该语句 wrap 起来的部分将不会track 梯度
        with torch.no_grad():
            # 生成的结果
            predictions = []
            source = torch.tensor(source_image[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
            if not cpu:
                source = source.cuda()
            kp_source = kp_detector(source)
            count = 0
            while True:
                ret, frame = cap.read()
                frame = cv2.flip(frame, 1)
                if ret == True:

                    if not video_path:
                        x = 143
                        y = 87
                        w = 322
                        h = 322
                        frame = frame[y:y + h, x:x + w]
                    frame1 = resize(frame, (256, 256))[..., :3]

                    if count == 0:
                        source_image1 = frame1
                        source1 = torch.tensor(source_image1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)
                        kp_driving_initial = kp_detector(source1)

                    frame_test = torch.tensor(frame1[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2)

                    driving_frame = frame_test
                    if not cpu:
                        driving_frame = driving_frame.cuda()
                    kp_driving = kp_detector(driving_frame)
                    kp_norm = normalize_kp(kp_source=kp_source,
                                           kp_driving=kp_driving,
                                           kp_driving_initial=kp_driving_initial,
                                           use_relative_movement=relative,
                                           use_relative_jacobian=relative,
                                           adapt_movement_scale=adapt_movement_scale)
                    out = generator(source, kp_source=kp_source, kp_driving=kp_norm)
                    predictions.append(np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0])
                    im = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
                    im = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
                    # joinedFrame = np.concatenate((im, frame1), axis=1)
                    print(im.shape, end='\r')
                    out1.write(img_as_ubyte(im))
                    self.send(f"{count}")
                    count += 1
                else:
                    break

            cap.release()
            out1.release()
            cv2.destroyAllWindows()
