from langchain_core.vectorstores import InMemoryVectorStore
from langchain import hub
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langgraph.graph import START, StateGraph
from typing_extensions import List, TypedDict
from langchain_ollama import ChatOllama
from langchain_ollama import OllamaEmbeddings
from docx import Document as DocxDocument
import os
import argparse


# todo: remove env setting
os.environ["LANGCHAIN_TRACING_V2"] = "true"
os.environ["LANGCHAIN_PROJECT"]="museum.ai"
os.environ["LANGCHAIN_API_KEY"]="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"
os.environ["LANGCHAIN_ENDPOINT"]="https://api.smith.langchain.com"

# using local model to embed
embeddings = OllamaEmbeddings(model="nomic-embed-text:latest")
# input_text = "The meaning of life is 42"
# vector = embeddings.embed_query(input_text)
# print(vector)
vector_store = InMemoryVectorStore(embeddings)

# Function to load and extract text from a list of .docx files to langchain Documents
def load_docx(file_paths: List[str]) -> List[Document]:
    documents = []
    for file_path in file_paths:
        doc = DocxDocument(file_path)
        full_text = [os.path.basename(file_path).replace('\xa0', '')]  # add title to text
        print(full_text)
        for para in doc.paragraphs:
            full_text.append(para.text.replace('\n', '').replace(' ', ''))
        docx_text = ''.join(full_text)  # dunhuang doc is usually small so not using '\n' to join, else it will lead to bad chunking
        print(docx_text[:500])
        documents.append(Document(page_content=docx_text))
    return documents

# Function to get all .docx file paths from a directory
def get_docx_file_paths(directory: str) -> List[str]:
    return [os.path.join(directory, file) for file in os.listdir(directory) if file.endswith('.docx')]


docx_dir = "./data/dunhuang_raw/0254"
docx_files = get_docx_file_paths(docx_dir)
docs = load_docx(docx_files)
# print("*** docs[0]:", docs[0])

text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
all_splits = text_splitter.split_documents(docs)

# Index chunks
_ = vector_store.add_documents(documents=all_splits)

# Define prompt for question-answering
prompt = hub.pull("rlm/rag-prompt")


# Define state for application
class State(TypedDict):
    question: str
    context: List[Document]
    answer: str


class TimedChatOllama(ChatOllama):
    def _create_chat_stream(
        self,
        messages,
        stop,
        **kwargs,
    ):
        import time
        chat_params = self._chat_params(messages, stop, **kwargs)

        start_time = time.time()  # Record start time
        first_token_time = None
        previous_token_time = start_time
        token_times = []

        if chat_params["stream"]:
            for i, part in enumerate(self._client.chat(**chat_params)):
                current_time = time.time()
                if i == 0:
                    first_token_time = current_time
                    print(f"1st token latency: {(first_token_time - start_time)*1000:.3f} ms")
                else:
                    token_time = current_time - previous_token_time
                    token_times.append(token_time)
                    # print(f"Generated token: {part['message']['content']}, generate time: {token_time} s")
                yield part
                previous_token_time = time.time()  # Update previous_token_time after yield

            if token_times:
                average_token_time = sum(token_times) / len(token_times)
                print(f"Avg 2+ token latency: {average_token_time*1000:.3f} ms")
        else:
            yield self._client.chat(**chat_params)    


# Define application steps
def retrieve(state: State):
    retrieved_docs = vector_store.similarity_search(state["question"])
    return {"context": retrieved_docs}


def generate(state: State, llm):
    docs_content = "\n\n".join(doc.page_content for doc in state["context"])
    print("Context text:", docs_content)
    messages = prompt.invoke({"question": state["question"], "context": docs_content})
    response = llm.invoke(messages)
    return {"answer": response.content}

def main():
    parser = argparse.ArgumentParser(description="Run the museum.ai application.")
    parser.add_argument("--test_performance", action="store_true", help="Whether to test performance")
    parser.add_argument("--llm", type=str, default="llama3.1:8b", help="Model to test")

    args = parser.parse_args()

    # Initialize the LLM with Local Llama 3.1 model
    llm = TimedChatOllama(
        model=args.llm,
        temperature=0,
    )

    # Compile application and test
    graph_builder = StateGraph(State).add_sequence([retrieve, lambda state: generate(state, llm)])
    graph_builder.add_edge(START, "retrieve")
    graph = graph_builder.compile()

    question = "254窟主室南壁有什么"
    print(f"Query: {question}")
    if args.test_performance:
        for i in range(2):
            if i == 0:
                print("[Warm up]:")
            else: 
                print("1st trial:")
            response = graph.invoke({"question": question})
    else:
        response = graph.invoke({"question": question})
    print("RAG based Answer:", response["answer"])
    # reference answer on mtl 165u by default model llama3.1:8b
    #  "RAG based Answer: 莫高窟第254窟主室南壁上画有交脚菩萨和天宫伎乐九身，下部画有降魔变一铺。表现释迦成佛的故事，其中包括魔王波旬率领军队围攻释迦，以及释迦以神通力将三个魔女变成老妪的情节。"

if __name__ == "__main__":
    main()
