# pip install sentence-transformers -i https://pypi.tuna.tsinghua.edu.cn/simple/
# pip install -U langchain-unstructured
# pip install -U langchain-huggingface
# pip install -U unstructured -i https://pypi.tuna.tsinghua.edu.cn/simple/

import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"  # TOKENIZERS_PARALLELISM

from typing import List
from pathlib import Path
import chromadb
from langchain_chroma import Chroma
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_core.documents import Document
from langchain_community.document_loaders import UnstructuredMarkdownLoader
from langchain_community.vectorstores.utils import filter_complex_metadata
from langchain_deepseek import ChatDeepSeek
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.messages import SystemMessage

# model_name = "BAAI/bge-large-en-v1.5"
# model_kwargs = {'device': 'cuda'}
model_name = "/Users/hhwang/models/bge-large-zh-v1.5"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity

chroma_path = "chroma_langchain_db"
collection_name = "rag-doc"

llm = ChatDeepSeek(
    model="deepseek-chat",
    temperature=0.5,
    max_tokens=4096,
    timeout=None,
    max_retries=2,
    streaming=False,
    api_key=os.getenv("DEEPSEEK_TOKEN", ""),
)

def format_docs(docs):
    return "\n\n".join([f"文档 {i+1}:\n{doc.page_content}" for i, doc in enumerate(docs)])

print(f"Loading model {model_name} ......")
embed_model = HuggingFaceEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs,
)
print(f"Load model {model_name} done!")

if os.path.exists(chroma_path):
    print(f"Load vector store from local {chroma_path} ......")
    vectorstore = Chroma(
        persist_directory=chroma_path,
        embedding_function=embed_model,
        collection_name=collection_name,
    )
    retriever = vectorstore.as_retriever(
        search_type="similarity",
    )
    print(f"Load vector store from local {chroma_path} done!")
else:
    md_path = "/Users/hhwang/code/jihulab/opencsg/opencsg-docs/docs"
    exclude_dirs = [f"{md_path}/starship", f"{md_path}/autohub"]
    md_root_path = Path(md_path)
    md_file_objs = [
        file for file in md_root_path.rglob("*.md")
        if not any(str(file).startswith(exclude_dir) for exclude_dir in exclude_dirs)
    ]

    md_files = [str(file) for file in md_file_objs]

    print(f"Find {len(md_files)} markdown files in {md_path}")

    md_docs: List[Document] = []

    for md_file in md_files:
        print(f"Loading markdown file: {md_file}")
        loader = UnstructuredMarkdownLoader(md_file, remove_hyperlinks=True, remove_images=True)
        documents = loader.load()
        filtered_documents = filter_complex_metadata(documents)
        if filtered_documents:
            md_docs.extend(filtered_documents)

    print(f"Loaded {len(md_docs)} markdown files.")

    print("Creating vector store ......")    
    vectorstore = Chroma.from_documents(
        documents=md_docs,
        collection_name=collection_name,
        embedding=embed_model,
        persist_directory=chroma_path,
        client_settings=chromadb.Settings(anonymized_telemetry=False)
    )
    print("Vector store completed!")

    retriever = vectorstore.as_retriever(
        search_type="similarity",
    )

user_query = "如何创建代码仓库？"

result_docs = retriever.invoke(user_query, k=3)
# for res_doc in result_docs:
#     print(res_doc)

context = format_docs(result_docs)

syste_prompt = """
## Role:
你是一个专业的文档助手。请基于以下检索到的文档内容回答用户的问题。

## Constraints:
如果文档中没有相关信息，请直接说明无法回答，不要编造信息。 
在最后提示用户可以在 https://opencsg.com/docs 查找更多信息。

## Output:
请提供准确、简洁的回答. """

system_message = SystemMessage(content=syste_prompt)

user_prompt = ChatPromptTemplate.from_template("检索到的文档:\n\n{context}\n\n用户问题: {question}\n\nresponse:")

user_message = user_prompt.format(context=context, question=user_query)

response = llm.invoke([system_message, user_message], config={"streaming": False})

print(f"answer: {response.content}")

# docs_and_scores = vectorstore.similarity_search_with_score(user_query, k=3)
# for doc, score in docs_and_scores:
#     print(f"相似度分数: {score}")
#     print(f"文档内容: {doc.page_content[:100]}...")
