"""
At the command line, only need to run once to install the package via pip:

$ pip install --upgrade google-generativeai
"""

import os
import sys 
import signal
import socket
import cv2
import google.generativeai as genai
import rospy
from cv_bridge import CvBridge
import PIL.Image
import actionlib
from sensor_msgs.msg import Image
from semantic_nav.msg import NavStrAction, NavStrGoal, NavStrResult, NavStrFeedback
from llm_navigator import LlmNavigatorInMap

socket.setdefaulttimeout(180)
IMAGE_TOPIC = "/camera/color/image_raw"
SAMPLE_RATE = 0.6
GEMINI_KEY_NUM = 5


def signal_handler(sig, frame):
    print("接收到信号，程序即将退出...")
    sys.exit(0)


signal.signal(signal.SIGINT, signal_handler)  # 注册 Ctrl+C 信号处理函数


class GeminiModel:
    def __init__(self, api_key=os.getenv("GOOGLE_API_KEY"), instruction=""):
        # set up google gemini model
        if api_key is None:
            raise ValueError("GOOGLE_API_KEY environment variable must be set.")
        genai.configure(api_key=api_key, transport="rest")
        self.model = genai.GenerativeModel(
            model_name="models/gemini-1.5-pro-latest", system_instruction=instruction
        )
        
    def generate_response(self, prompts) -> str:
        response = self.model.generate_content(prompts)
        return response.text


class GeminiMultiModel:
    def __init__(self, instruction=""):
        google_api_array = []
        i = 0
        while True:
            i = i + 1
            api_key = os.getenv("GOOGLE_API_KEY" + str(i))
            if api_key is None:
                break
            google_api_array.append(api_key)
        # set up google gemini model instance
        self.gemini_array = [] 
        for api_key in google_api_array:
            self.gemini_array.append(
                GeminiModel(api_key=api_key, instruction=instruction)
            )
    
    def invoke(self, inputs):
        # index of the gemini model array
        i = int(0)
        response = str()
        while i < len(self.gemini_array):
            try:
                response = self.gemini_array[i].generate_response(inputs)
                break
            except Exception:
                i = i + 1
        # move the failed request instance to the end of queue
        self.gemini_array = self.gemini_array[i:] + self.gemini_array[:i]
        return response


class LlmObjectSearch:
    def __init__(self):
        instruction = """Suppose you are a robot equiped with a camera performing user-specified tasks in the scene.
You can see the image captured by the camera and determine whether you have reached a position where you can complete the user-specified task according to your observation. 
You need to try to get as close as possible to the target objects the user wants you to search. 
Remember Only output yes or no."""
        self.gemini_model = GeminiMultiModel(instruction=instruction)
        self.navigator_in_map = LlmNavigatorInMap(os.path.join(os.path.dirname(__file__), "../json/map.json"))

        self.cv_bridge = CvBridge()
        # Create action server
        self.nav_server = actionlib.SimpleActionServer('semantic_nav', NavStrAction, self.execute_callback, False)
        self.nav_server.start()
        print('Started navigation server.')
        self.done_flag = False
        
    def execute_callback(self, goal):
        """
        Execute the action
        """
        print("User: " + goal.input)
        print("Agent:")
        if not self.navigator_in_map.invoke(goal.input):
            self.nav_server.set_aborted()
            return
        print("-----------------------------")
        rate = rospy.Rate(SAMPLE_RATE)
        # 执行目标的循环
        while not rospy.is_shutdown() and not self.nav_server.is_preempt_requested():
            image_msg = rospy.wait_for_message(IMAGE_TOPIC, Image)
            rate.sleep()
            self.cv_image = self.cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
            rgb_image = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2RGB) # OpenCV默认使用BGR（蓝-绿-红）顺序，而Pillow默认使用RGB（红-绿-蓝）顺序
            img_pil = PIL.Image.fromarray(rgb_image)

            prompt = "The user-sepcified instruction is: " + goal.input
            response = self.gemini_model.invoke([prompt, img_pil])
            print("User: " + prompt + " Determine whether you have reached a position where you can complete the user-specified task from the image observation.")
            if len(response) > 0: 
                if "yes" in response.lower():
                    result = NavStrResult()
                    result.result = "Robot has arrived the target object"
                    self.nav_server.set_succeeded(result)
                    print("Agent:" + result.result)
                    # stop the move_base
                    self.navigator_in_map.cancel()
                    img_pil.show(title="image observation for success")
                    break
                else:
                    feedback = NavStrFeedback()
                    feedback.feedback = "Robot has not arrived yet"
                    self.nav_server.publish_feedback(feedback)
                    print("Agent:" + feedback.feedback)
            else:
                print("Agent: The invoking frequency is too high to processed")

            print("-----------------------------")


def main():
    # set up ros configuration
    rospy.init_node("llm_search_object", anonymous=True)
    object_search = LlmObjectSearch()
    object_search  # dismiss unused variable warning
    rospy.spin()


if __name__ == "__main__":
    main()
