import os
import base64
import httpx
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.messages import HumanMessage
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.output_parsers import StrOutputParser
import google.generativeai as genai
from semantic_nav.utils import image_to_bytes
from semantic_nav.log_utils import get_logger
logger = get_logger()


class GeminiModel:
    def __init__(self, api_key=os.getenv("GOOGLE_API_KEY"), instruction=""):
        # set up google gemini model
        if api_key is None:
            raise ValueError("GOOGLE_API_KEY environment variable must be set.")
        genai.configure(api_key=api_key, transport="rest")
        self.model = genai.GenerativeModel(
            model_name="models/gemini-1.5-pro-latest", system_instruction=instruction
        )

    def invoke(self, prompts) -> str:
        response = self.model.generate_content(prompts)
        return response.text


class GeminiMultiModel:
    def __init__(self, instruction=""):
        google_api_array = []
        i = 0
        while True:
            i = i + 1
            api_key = os.getenv("GOOGLE_API_KEY" + str(i))
            if api_key is None:
                break
            google_api_array.append(api_key)
        # set up google gemini model instance
        self.gemini_array = [] 
        for api_key in google_api_array:
            self.gemini_array.append(
                GeminiModel(api_key=api_key, instruction=instruction)
            )

    def invoke(self, inputs):
        # index of the gemini model array
        i = int(0)
        response = str()
        while i < len(self.gemini_array):
            try:
                response = self.gemini_array[i].invoke(inputs)
                break
            except Exception:
                i = i + 1
        # move the failed request instance to the end of queue
        self.gemini_array = self.gemini_array[i:] + self.gemini_array[:i]
        return response


class ChatChain:
    def __init__(self, prompt_template, model=None):
        if model is None:
            self.model = ChatGoogleGenerativeAI(model="gemini-1.5-pro")
        else:
            self.model = model
        self.prompt_template = prompt_template
        self.chain = self.prompt_template | self.model | StrOutputParser()

    def invoke(self, input):
        response = self.chain.invoke(input)
        return response


class LlmTranslator:
    def __init__(self):
        system_template = "You are a translator. You need to translator the user input from {source_language} to {target_language}.Remember only return the translation result, do not return any other information."
        user_template = "The user input is: {user_input}"
        prompt_template = ChatPromptTemplate.from_messages([
            ('system', system_template),
            ('user', user_template),
        ])
        self.chain = ChatChain(prompt_template=prompt_template)

    def invoke(self, user_input, src_lang="Other languages", tgt_lang="English"):
        return self.chain.invoke({"source_language": src_lang, "target_language": tgt_lang, "user_input": user_input})


class EmbodiedTaskExecutor:
    def __init__(self):
        system_template = """You are a robot equiped with a camera performing user-specified tasks in the scene.
You can see the image captured by the camera and determine whether you have reached a position where you can complete the user-specified task according to your observation. 
If you have reached the right position, return "yes"; otherwise return information of the current environment according to observation and the user-specified task."""
        user_template = "The user input is: {user_input}"
        prompt_template = ChatPromptTemplate.from_messages([
            ('system', system_template),
            ('user', user_template),
        ])
        self.chain = ChatChain(prompt_template=prompt_template)

    def invoke(self, user_input):
        return self.chain.invoke({"user_input": user_input})


def to_image_message(img_arg):
    img_byte = image_to_bytes(img_arg)

    img_base64 = base64.b64encode(img_byte).decode("utf-8")
    return {"type": "image_url", "image_url": f"data:image/png;base64,{img_base64}"}


def main(args=None):
    translator = LlmTranslator()
    # test image prompt invoke
    image_url = "https://bkimg.cdn.bcebos.com/pic/d788d43f8794a4c27d1e39dcc3a20cd5ad6edcc493b7?x-bce-process=image/format,f_auto/watermark,image_d2F0ZXIvYmFpa2UyNzI,g_7,xp_5,yp_5,P_20/resize,m_lfit,limit_1,h_1080"
    image = httpx.get(image_url).content
    prompt_template = ChatPromptTemplate.from_messages([
        ('user', ["What's in this image?"]),
        MessagesPlaceholder('image')
    ])
    chain = ChatChain(prompt_template=prompt_template)
    logger.debug(translator.invoke(chain.invoke({'image': [HumanMessage(content=[to_image_message(image)])]}), src_lang="English", tgt_lang="Chinese"))


if __name__ == "__main__":
    main()
