from typing import List
import time
import pathlib
import threading
import requests
import numpy as np
import rclpy
from rclpy.executors import ExternalShutdownException
from rclpy.executors import MultiThreadedExecutor
from cv_bridge import CvBridge
from langchain_core.prompts import PromptTemplate
from semantic_nav.navigator import Navigator
from semantic_nav.ros_utils import ColorDepthSyncer
from semantic_nav.slam_utils import depth_to_point_cloud
from semantic_nav.text_process import extract_list
from semantic_nav.log_utils import get_logger
logger = get_logger()


class MultimodalSemanticNav(ColorDepthSyncer):
    def __init__(self, dest_path, len_to_infer=8, node_name='multimoda_semantic_nav', namespace='', voxel_size=0.1):
        super().__init__(node_name=node_name, namespace=namespace)
        self.buf_cnt = 0
        self.color_file = None
        self.depth_file = None
        self.color_buf = []
        self.depth_buf = []
        self.dest_path = str(dest_path)
        self.len_to_infer = len_to_infer
        self.navigator = Navigator()
        self.rgb_lock = threading.Lock()
        self.loop_thread = threading.Thread(target=self.loop_execute)
        self.loop_thread.start()
        self.cv_bridge = CvBridge()
        self.arrived_template = PromptTemplate.from_template("Have I arrived {target}?")
        self.perception_template = PromptTemplate.from_template("Output the 3D bounding box of {object}.")

    def init(self, queue_size=10, slop=None):
        super().init(queue_size=queue_size, slop=slop)
        return self

    def destroy_node(self):
        self.loop_thread.join()
        super().destroy_node()

    def process_observation(self, color_msgs, depth_msgs):
        if self.buf_cnt < self.len_to_infer:
            cv_rgb_image = self.cv_bridge.imgmsg_to_cv2(color_msgs[0], desired_encoding="bgr8")
            cv_depth_image = self.cv_bridge.imgmsg_to_cv2(depth_msgs[0], desired_encoding="32FC1")
            self.color_buf.append(np.array(cv_rgb_image))
            self.depth_buf.append(depth_to_point_cloud(cv_depth_image, self.camera_intrinsics[0]))
            self.buf_cnt += 1
        else:
            time_str = time.strftime("%Y%m%d-%H-%M-%S")
            color_file = self.dest_path + "/rgb_" + time_str + ".npy"
            depth_file = self.dest_path + "/depth_" + time_str + ".npy"
            np.save(color_file, self.color_buf)
            np.save(depth_file, self.depth_buf)
            with self.rgb_lock:
                self.color_file = color_file
                self.depth_file = depth_file

            self.color_buf.clear()
            self.depth_buf.clear()
            self.buf_cnt = 0

    def loop_execute(self):
        instance_list = ['corridor', 'fridge', 'counter']
        for instance in instance_list:
            while True:
                if self.color_file and self.depth_file:
                    with self.rgb_lock:
                        arrived = self.callLLM(self.color_file, self.depth_file, self.arrived_template.format(target=instance))
                    if 'yes' in arrived.lower():
                        break
                    else:
                        logger.info("Not yet arrived")
                    with self.rgb_lock:
                        bbox3d_str = self.callLLM(self.color_file, self.depth_file, self.perception_template.format(object=instance))
                    bbox3d = np.array(extract_list(bbox3d_str))
                    logger.info(bbox3d)
                    time.sleep(0.1)
                else:
                    time.sleep(1.0)

    def callLLM(self, color_file, depth_file, prompt):
        # Call the language model to generate the response
        response = str()
        return response


def main(args=None):
    rclpy.init(args=args)
    try:
        dest_path = pathlib.Path('/tmp').joinpath('LLM_input')
        dest_path.mkdir(parents=True, exist_ok=True)
        semantic_navigator = MultimodalSemanticNav(dest_path)
        
        semantic_navigator.set_parameters([rclpy.parameter.Parameter(name="color_topics", value=["/zed/zed_node/rgb/image_rect_color"]),
                                        rclpy.parameter.Parameter(name="depth_topics", value=["/zed/zed_node/depth/depth_registered"]),
                                        rclpy.parameter.Parameter(name="color_info_topics", value=["/zed/zed_node/rgb/camera_info"]),
                                        rclpy.parameter.Parameter(name="depth_info_topics", value=["/zed/zed_node/depth/camera_info"])])

        semantic_navigator.init(queue_size=10, slop=0.1)
        executor = MultiThreadedExecutor()
        rclpy.spin(semantic_navigator, executor=executor)
    except KeyboardInterrupt:
        pass
    except ExternalShutdownException:
        pass
    finally:
        semantic_navigator.destroy_node()
        rclpy.shutdown()


if __name__ == "__main__":
    main()
