import argparse
# from dataclasses import dataclass
from langchain_chroma import Chroma
from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain_ollama import OllamaEmbeddings
import psycopg2

CHROMA_PATH = "chroma"

PROMPT_TEMPLATE = """
Answer the question based only on the following context:

{context}

---

Answer the question based on the above context: {question}
"""


def main():
    # Create CLI.
    # parser = argparse.ArgumentParser()
    # parser.add_argument("query_text", type=str, help="The query text.")
    # args = parser.parse_args()
    # query_text = args.query_text
    query_text = "How does Alice meet the Mad Hatter?"

    # Prepare the DB.
    embedding_function = OllamaEmbeddings(base_url='http://localhost:11434',model="mxbai-embed-large")

    query_pgvector(embedding_function, query_text)
    # model = ChatOpenAI()
    # response_text = model.predict(prompt)

    # sources = [doc.metadata.get("source", None) for doc, _score in results]
    # formatted_response = f"Response: {response_text}\nSources: {sources}"
    # print(formatted_response)

def query_chroma(embedding_function:OllamaEmbeddings, query_text:str):
    # Search the DB.
    db = Chroma(persist_directory=CHROMA_PATH, embedding_function=embedding_function)

    # Search the DB.
    results = db.similarity_search_with_relevance_scores(query_text, k=3)
    # if len(results) == 0 or results[0][1] < 0.7:
    if len(results) == 0:
        print(f"Unable to find matching results.")
        return

    context_text = "\n\n---\n\n".join([doc.page_content for doc, _score in results])
    prompt_template = ChatPromptTemplate.from_template(PROMPT_TEMPLATE)
    prompt = prompt_template.format(context=context_text, question=query_text)
    print(prompt)

def query_pgvector(embedding_function:OllamaEmbeddings, query_text:str):
    # 数据库连接参数
    db_params = {
        'dbname': 'vector_db',
        'user': 'postgres',
        'password': '123',
        'host': 'localhost',
        'port': '5432'
    }
    conn = psycopg2.connect(**db_params)
    cur = conn.cursor()

    # 生成查询文本的嵌入
    query_embedding = embedding_function.embed_query(query_text)

    # 根据距离类型选择操作符
    distance_type = "l2"
    if distance_type == "l2":
        operator = "<->"
    elif distance_type == "cosine":
        operator = "<=>"
    elif distance_type == "inner":
        operator = "<#>"
    else:
        raise ValueError("Unsupported distance_type. Use 'l2', 'cosine', or 'inner'.")

    # 执行相似性查询
    query = f"""
        SELECT id, text_content, embedding {operator} %s AS distance
        FROM documents
        ORDER BY distance {'DESC' if distance_type == 'inner' else 'ASC'}
        LIMIT %s;
    """
    cur.execute(query, (str(query_embedding), 3))
    results = cur.fetchall()

    # 打印结果
    print(f"Top {3} similar documents for query: '{query_text}'")
    for row in results:
        print(f"ID: {row[0]}, Text: {row[1][:50]}..., Distance: {row[2]}")

    cur.close()
    conn.close()

if __name__ == "__main__":
    main()
