#!/bin/python3

from .base import SensorBase
import carla
import numpy as np
from queue import Queue
import numpy as np

class Camera(SensorBase):
    def __init__(self, name: str,
                 camera_type: str,
                 record: bool,
                 record_root: str,
                 actor_attached: carla.Actor,
                 transform_attached: carla.Transform,
                 config: dict
        ):
        super().__init__(name, camera_type, record, record_root, actor_attached,
                         transform_attached)
        self.bp.set_attribute("image_size_x", config["image_size_x"])
        self.bp.set_attribute("image_size_y", config["image_size_y"])
        self.bp.set_attribute("fov", config["fov"])
        super().generate()
        self.queue = None
    
    def destroy(self):
        return super().destroy()

    def set_queue(self, queue):
        self.queue = queue

    def _callback(self, data):
        # print("camera: {} put to queue: frame: {}, ".format(self.name, data.frame))
        self.queue.put((data.frame, self.name, data))

class CameraWithAnnotation():
    def __init__(self, name: str,
                 record: bool,
                 record_root: str,
                 actor_attached: carla.Actor,
                 transform_attached: carla.Transform,
                 config: dict
        ):
        self.name = name
        self.record = record
        self.record_root = record_root
        self.actor_attached = actor_attached
        self.transform_attached = transform_attached
        self.config = config

        self.camera_rgb = Camera(name, "sensor.camera.rgb", record, record_root,
            actor_attached, transform_attached, config)

        self.camera_semseg = Camera(name, "sensor.camera.semantic_segmentation",
            record, record_root, actor_attached, transform_attached, config)

        self.camera_depth = Camera(name, "sensor.camera.depth", record, record_root,
            actor_attached, transform_attached, config)
    
    def destroy(self):
        self.camera_rgb.destroy()
        self.camera_semseg.destroy()
        self.camera_depth.destroy()
    
    def set_queue(self, queue):
        self.camera_rgb.set_queue(queue)
        self.camera_semseg.set_queue(queue)
        self.camera_depth.set_queue(queue)

class AroundviewFisheyeCamera4():
    def __init__(self, record: bool,
                 record_root: str,
                 actor_attached: carla.Actor,
                 config: dict
        ): 
        self.record = record
        self.record_root = record_root
        self.actor_attached = actor_attached
        self.config = config
        self.queue = Queue(maxsize=72)
    
    def generate(self):
        actor_box3d = self.actor_attached.bounding_box
        actor_attached = self.actor_attached
        record = self.record
        record_root = self.record_root
        config = self.config

        # 前视相机
        camera_name = "aroundview_front"
        front_transform = carla.Transform()
        front_transform.location.x = actor_box3d.extent.x
        front_transform.location.y = 0.0
        front_transform.location.z = 0.36 # 单位是米
        # front_transform.rotation = carla.Rotation(pitch=-93)
        self.camera_front = CameraWithAnnotation(camera_name, record, "front",
            actor_attached, front_transform, config[camera_name])
        self.camera_front.set_queue(self.queue)

        # 后视相机
        camera_name = "aroundview_back"
        back_transform = carla.Transform()
        back_transform.location.x = -actor_box3d.extent.x
        back_transform.location.y = 0.0
        back_transform.location.z = 0.35 # 单位是米
        back_transform.rotation = carla.Rotation(yaw=-180)
        self.camera_back = CameraWithAnnotation(camera_name, record, record_root,
            actor_attached, back_transform, config[camera_name])
        self.camera_back.set_queue(self.queue)

        # 左视相机
        camera_name = "aroundview_left"
        left_transform = carla.Transform()
        left_transform.location.x = actor_box3d.extent.x * 3 / 5
        left_transform.location.y = actor_box3d.extent.y * -1
        left_transform.location.z = 0.8 # 单位是米
        left_transform.rotation = carla.Rotation(yaw=-90)
        self.camera_left = CameraWithAnnotation(camera_name, record, record_root,
            actor_attached, left_transform, config[camera_name])
        self.camera_left.set_queue(self.queue)


        # 右视相机
        camera_name = "aroundview_right"
        right_transform = carla.Transform()
        right_transform.location.x = actor_box3d.extent.x * 3 / 5
        right_transform.location.y = actor_box3d.extent.y * 1
        right_transform.location.z = 0.8 # 单位是米
        right_transform.rotation = carla.Rotation(yaw=90)
        self.camera_right = CameraWithAnnotation(camera_name, record, record_root,
            actor_attached, right_transform, config[camera_name]
        )
        self.camera_right.set_queue(self.queue)
    
    def destroy(self):
        self.camera_front.destroy()
        self.camera_back.destroy()
        self.camera_left.destroy()
        self.camera_right.destroy()
    
    def parse_camera_data(self, name, data):
        # 如果是分割图，转换成人类可以查看的类型
        if "segmentation" in name:
            data.convert(carla.ColorConverter.CityScapesPalette)

        if "depth" in name:
            data.convert(carla.ColorConverter.LogarithmicDepth)

        img = np.frombuffer(data.raw_data, dtype=np.dtype("uint8"))
        img = img.reshape((data.height, data.width, 4))
        img = img[:, :, :3]
        img = img[:, :, ::-1]
        return img

    def vis_frame(self, display):
        import pygame

        world = self.actor_attached.get_world()
        w_frame = world.get_snapshot().frame

        img_names = {
            "aroundview_left_sensor.camera.rgb": [0, 0],
            "aroundview_left_sensor.camera.semantic_segmentation": [1, 0],
            "aroundview_left_sensor.camera.depth": [2, 0],

            "aroundview_front_sensor.camera.rgb": [0, 1],
            "aroundview_front_sensor.camera.semantic_segmentation": [1, 1],
            "aroundview_front_sensor.camera.depth": [2, 1],

            "aroundview_right_sensor.camera.rgb": [0, 2],
            "aroundview_right_sensor.camera.semantic_segmentation": [1, 2],
            "aroundview_right_sensor.camera.depth": [2, 2],

            "aroundview_back_sensor.camera.rgb": [0, 3],
            "aroundview_back_sensor.camera.semantic_segmentation": [1, 3],
            "aroundview_back_sensor.camera.depth": [2, 3]
        }

        if self.queue.qsize() < len(img_names):
            return
        frame_imgs = []
        for i in range(4 * 3):
            frame, name, data = self.queue.get()
            img = self.parse_camera_data(name, data)
            frame_imgs.append((name, img))

        array = np.zeros((3 * 480, 4 * 640, 3), dtype=np.uint8)
        for frame_img in frame_imgs:
            data_name = frame_img[0]
            img = frame_img[1]
            row, col = img_names[data_name]
            array[row * 360: (row + 1) * 360, col * 480: (col + 1) * 480, :] = img

        surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
        if surface:
            display.blit(surface, (0, 0))
        pygame.display.flip()





