import os
import argparse
import platform
from typing import List
from langgraph.graph import START, StateGraph
from typing_extensions import List, TypedDict
from langchain_ollama import ChatOllama
from langchain import hub
from langchain_core.documents import Document
from data import image_db


# todo: remove env setting
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"] = "museum.ai"
os.environ["LANGCHAIN_API_KEY"] = "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"
os.environ["LANGCHAIN_ENDPOINT"] = "https://api.smith.langchain.com"
os.environ["NOMIC_API_KEY"] = "nk-o09gqPOC9Qq-b7SCo5-tYNOvORBvBV-YUUomTg3fbjU"

db = image_db.load_img_db("./data/dunhuang_db")

# Define prompt for question-answering
prompt = hub.pull("rlm/rag-prompt")


class State(TypedDict):
    img_path: str
    question: str
    context: List[Document]
    answer: str

class TimedChatOllama(ChatOllama):
    def _create_chat_stream(
        self,
        messages,
        stop,
        **kwargs,
    ):
        import time
        chat_params = self._chat_params(messages, stop, **kwargs)

        start_time = time.time()  # Record start time
        first_token_time = None
        previous_token_time = start_time
        token_times = []

        if chat_params["stream"]:
            for i, part in enumerate(self._client.chat(**chat_params)):
                current_time = time.time()
                if i == 0:
                    first_token_time = current_time
                    print(f"1st token latency: {(first_token_time - start_time)*1000:.3f} ms")
                else:
                    token_time = current_time - previous_token_time
                    token_times.append(token_time)
                    # print(f"Generated token: {part['message']['content']}, generate time: {token_time} s")
                yield part
                previous_token_time = time.time()  # Update previous_token_time after yield

            if token_times:
                average_token_time = sum(token_times) / len(token_times)
                print(f"Avg 2+ token latency: {average_token_time*1000:.3f} ms")
        else:
            yield self._client.chat(**chat_params)    


def retrieve(state: State):
    retrieved_docs = db.similarity_search_by_image(state["img_path"], k=1)
    img_path = retrieved_docs[0].metadata["path"]
    if platform.system().lower() == "linux":
        img_path = img_path.replace("\\", "/")
    print("Similar image path:", img_path)
    txt_path = img_path.replace(".png", ".txt")
    print("Context text path:", txt_path)
    with open(txt_path, "r", encoding='utf-8') as f:
        img_content = f.read()
        return {"context": [Document(img_content)]}


def generate(state: State, llm):
    docs_content = "\n\n".join(doc.page_content for doc in state["context"])
    print("Context text:", docs_content)
    messages = prompt.invoke({"question": state["question"], "context": docs_content})
    response = llm.invoke(messages)
    return {"answer": response.content}

def main():
    parser = argparse.ArgumentParser(description="Run the museum.ai application.")
    parser.add_argument("--test_performance", action="store_true", help="Whether to test performance")
    parser.add_argument("--llm", type=str, default="llama3.1:8b", help="Model to test")

    args = parser.parse_args()

    # Initialize the LLM with Local Llama 3.1 model
    llm = TimedChatOllama(
        model=args.llm,
        temperature=0,
    )

    # Compile application and test
    graph_builder = StateGraph(State).add_sequence([retrieve, lambda state: generate(state, llm)])
    graph_builder.add_edge(START, "retrieve")
    graph = graph_builder.compile()

    img = "./data/dunhuang_raw/0254/莫高窟第254窟_主室_南壁.png"
    question = "请讲述图片里面的故事，细节越丰富越好"
    print(f"Query: Picture: {img}; Question: {question}")
    if args.test_performance:
        for i in range(2):
            if i == 0:
                print("[Warm up]:")
            else: 
                print("1st trial:")
            response = graph.invoke(
                {"img_path": img, "question": question})
    else:
        response = graph.invoke(
        {"img_path": img, "question": question})
    print("iRAG based Answer:", response["answer"])
    # reference answer on mtl 165u by default model llama3.1:8b
    #  "RAG based Answer: 这幅壁画描绘了释迦牟尼成道前被天魔波旬诱惑和威胁的故事。画面中央是释迦牟尼结跏趺坐，左手持袈裟，右手置于膝上。两侧是魔王波旬的军队和三个女儿诱惑释迦的情节。"

if __name__ == "__main__":
    main()
