import os

import ollama
import requests
import yaml
from langchain_community.chat_models import ChatZhipuAI, ChatTongyi, ChatOpenAI
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough

from db.database import db

# 获取当前脚本所在目录的路径
current_dir = os.path.dirname(os.path.abspath(__file__))

# 项目根目录路径
project_root = os.path.dirname(current_dir)

# 读取配置信息
config_path = os.path.join(project_root, "config.yaml")
with open(config_path, 'r') as file:
    config = yaml.load(file, Loader=yaml.FullLoader)



llm = ChatZhipuAI(
    model=config["glm4"]["model_name"],
    api_key=config["glm4"]["api_key"],
    context_window=config["glm4"]["context_window"],
)


llm = ChatTongyi(
    model=config["qwen"]["model_name"],
    api_key=config["qwen"]["api_key"],
    context_window=config["qwen"]["context_window"],
)

llm = ChatOpenAI(
    model=config["deepseek-r1-ali"]["model_name"],
    api_key=config["deepseek-r1-ali"]["api_key"],
    base_url=config["deepseek-r1-ali"]["base_url"],
)


# llm分词
def llm_split(query: str):
    message = "从问题中提取关键词，并将返回的关键词用空格分隔，没有提取到关键词就返回空字符串，不要造词，否则会对你惩罚。\n----------\n问题：{query}"
    prompt = ChatPromptTemplate.from_messages([("human", message)])
    chat = {"query": RunnablePassthrough()} | prompt | llm | StrOutputParser()
    res = chat.invoke(query)
    print(res)
    return res


# 向量生成
def create_vector(query: str, model: str = config["ollama"]["embed"]):
    return ollama.embed(model, query, options={"seed": 0})["embeddings"][0]

# 预设问题匹配
def pre_question_search(collection: str, query: str, certainty: float, limit: int):
    vector = create_vector(query)

    client = db.get_client()
    reviews = client.collections.get(collection)
    query_result = reviews.query.near_vector(
        near_vector=vector,
        limit=limit,
        certainty=certainty,
        # distance=0.25,
    )
    matching_documents = []
    for doc in query_result.objects:
        tmp_doc = Document(page_content="", metadata={})
        for key in doc.properties:
            if key == 'text':
                tmp_doc.page_content = doc.properties[key]
            else:
                tmp_doc.metadata[key] = doc.properties[key]
        matching_documents.append(tmp_doc)

    return matching_documents

def rerank(query: str, nodes: list[Document], limit: int = 0, top_n: int = 5):
    scores = requests.post(
        config["model"]["reranker"]["url"],
        json={"query": query, "context": [node.page_content for node in nodes], "limit": limit}
    )

    for score, doc in zip(scores.json(), nodes):
        doc.metadata["score"] = score

    new_nodes = sorted(nodes, key=lambda x: -x.metadata["score"] if x.metadata["score"] else 0)[:top_n]

    # 添加引用
    urls = {}
    for i in range(len(new_nodes)):
        if new_nodes[i].metadata["url"] == " ":
            new_nodes[i].metadata["url"] = new_nodes[i].metadata["title"]

        if new_nodes[i].metadata["url"] not in urls:
            urls[new_nodes[i].metadata["url"]] = len(urls) + 1
        new_nodes[i].page_content = f"[[citation:{urls[new_nodes[i].metadata['url']]}]] {new_nodes[i].page_content}"

    return new_nodes