"""
At the command line, only need to run once to install the package via pip:

$ pip install --upgrade google-generativeai
"""

import sys
import pathlib
import threading
import copy
import socket
import cv2
import rclpy
from rclpy.executors import ExternalShutdownException
from rclpy.executors import MultiThreadedExecutor 
from cv_bridge import CvBridge
import PIL.Image
from sensor_msgs.msg import Image
from semantic_nav_interfaces.action import NavStr
from semantic_nav.semantic_nav_server import SemanticNavServer
from semantic_nav.llm_navigator import LlmNavigatorInMap
from semantic_nav.llm_utils import GeminiMultiModel
from semantic_nav.log_utils import get_logger
logger = get_logger()
socket.setdefaulttimeout(180)
IMGAE_TOPIC = "/camera_image_color"
FRAME_HZ = 4
SAMPLE_RATE = 0.6
GEMINI_KEY_NUM = 5


class LlmObjectSearch(SemanticNavServer):
    def __init__(self, map_path):
        super().__init__(namespace='search_object')
        instruction = """Suppose you are a robot equiped with a camera performing user-specified tasks in the scene.
You can see the image captured by the camera and determine whether you have reached a position where you can complete the user-specified task according to your observation. 
You need to try to get as close as possible to the target objects the user wants you to search. 
Remember Only output yes or no."""
        self.gemini_model = GeminiMultiModel(instruction=instruction)
        self.navigator_in_map = LlmNavigatorInMap(map_path)
        self.cv_bridge = CvBridge()
        self.img_pil = None
        self.rgb_lock = threading.Lock()
        self.image_subscriber = self.create_subscription(msg_type=Image, topic=IMGAE_TOPIC, callback=self.rgb_callback, qos_profile=1)

    def rgb_callback(self, msg):
        cv_image = self.cv_bridge.imgmsg_to_cv2(msg, "bgr8")
        rgb_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) # OpenCV uses BGR (Blue-Green-Red) order by default, while Pillow uses RGB (Red-Green-Blue) order by default
        with self.rgb_lock:
            self.img_pil = PIL.Image.fromarray(rgb_image)

    @staticmethod
    def execute_func(self, goal_handle):
        """
        Execute the action
        """
        user_input = goal_handle.request.input
        logger.info("User: " + user_input)
        logger.info("Agent:")
        if not self.navigator_in_map.invoke(user_input):
            self.nav_server.set_aborted()
            return
        logger.info("-----------------------------")
        rate = self.create_rate(SAMPLE_RATE)
        while rclpy.ok():
            if goal_handle.is_cancel_requested:
                # if so, we cancel the goal handle
                goal_handle.canceled()
                self.get_logger().info('Goal canceled')
                return NavStr.Result()
            rate.sleep()
            prompt = "The user-sepcified instruction is: " + user_input
            img_pil = PIL.Image()
            with self.rgb_lock:
                img_pil = copy.deepcopy(self.img_pil)
            response = self.gemini_model.invoke([prompt, img_pil])
            logger.info("User: " + prompt + " Determine whether you have reached a position where you can complete the user-specified task from the image observation.")
            if len(response) > 0:
                if "yes" in response.lower():
                    result = NavStr.Result()
                    result.result = "Robot has arrived the target object"
                    goal_handle.succeed()
                    logger.info("Agent:" + result.result)
                    # stop the navigator
                    self.navigator_in_map.cancel()
                    img_pil.show(title="image observation for success")
                    return result
                else:
                    feedback = NavStr.Feedback()
                    feedback.feedback = "Robot has not arrived yet"
                    goal_handle.publish_feedback(feedback)
                    logger.info("Agent:" + feedback.feedback)
            else:
                logger.info("Agent: The invoking frequency is too high to processed")
            logger.info("-----------------------------")
            return NavStr.Result()


def main(args=None):
    # set up ros configuration
    rclpy.init(args=args)
    try:
        from ament_index_python.packages import get_package_prefix
        package_path = get_package_prefix('semantic_nav')
        map_path = pathlib.Path(package_path).joinpath('json/map.json')
        if not map_path.exists():
            logger.error("Map file not found")
            return
        object_search_server = LlmObjectSearch(map_path)
        executor = MultiThreadedExecutor()
        rclpy.spin(object_search_server, executor=executor)
    except KeyboardInterrupt:
        pass
    except ExternalShutdownException:
        sys.exit(1)


if __name__ == "__main__":
    main()
