# -*- coding: utf-8 -*-
"""
@Author: zyx
@Date: 2024/11/28 19:10
@FileName: utils.py
@Description: 处理工具类
"""

import os
import uuid
from configs import get_tags, TAGS_DICT
import chromadb
from chromadb.config import Settings
from typing import Optional, List
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_chroma import Chroma
from langchain_core.documents import Document


def text_loader_splitter(
    file_path: str,
    chunk_size: Optional[int] = 500,
    chunk_overlap: Optional[int] = 0,
    separators: Optional[List[str]] = [
        "\n\n",
        "\n",
        " ",
        ".",
        ",",
        "\u200b",  # Zero-width space
        "\uff0c",  # Fullwidth comma
        "\u3001",  # Ideographic comma
        "\uff0e",  # Fullwidth full stop
        "\u3002",  # Ideographic full stop
        "",
    ]
) -> List[Document]:
    """加载指定文档并进行切分"""
    loader = PyMuPDFLoader(file_path)
    docs = loader.load()
    text_splitter = RecursiveCharacterTextSplitter(
        # Set a really small chunk size, just to show.
        chunk_size=chunk_size,
        chunk_overlap=chunk_overlap,
        separators=separators,
    )
    return text_splitter.split_documents(docs)


def get_vectordb(coll_path: str, coll: str, is_reset: bool = False) -> Chroma:
    """获取向量库db"""
    from langchain_community.embeddings.text2vec import Text2vecEmbeddings
    # 使用持久化的 chromadb
    client = chromadb.PersistentClient(
        path=coll_path,
        settings=Settings(allow_reset=True),
    )
    if is_reset:
        client.reset()
    # 指定计算方式为余弦相似度
    client.get_or_create_collection(coll, metadata={"hnsw:space": "cosine"})
    return Chroma(
        collection_name=coll,
        client=client,
        embedding_function=Text2vecEmbeddings(model_name_or_path="shibing624/text2vec-base-chinese-sentence"))


def _build_tag_chroma(coll_path: str, coll: str) -> None:
    """对指定标签做向量库"""
    vectordb = get_vectordb(coll_path=coll_path, coll=coll, is_reset=True)
    vectordb.add_texts(get_tags(coll))


def build_tag_chromas(root_path: str) -> None:
    """对所有标签做向量库"""
    # 建立分子设计与发现方向、化学合成反应预测与优化 相关tags 的向量库用于查询
    for coll in TAGS_DICT.keys():
        _build_tag_chroma(coll_path=os.path.join(root_path, coll), coll=coll)
    _build_tag_chroma(coll_path=os.path.join(root_path, "all_tags"), coll="all_tags")


def build_history(chat_history: List[dict]):
    """构建历史对话"""
    history = ""
    if chat_history:
        for i in range(0, len(chat_history), 2):
            history += "Human:" + chat_history[i]['content'] + "\n"
            history += "AI:" + chat_history[i + 1]['content'] + "\n"
    else:
        history += "Human:你好\n"
        history += "AI:你好\n"
    return history


def get_similar_docs_by_query(query: str,
                              tag_vectordb: Chroma,
                              knowledge_dbs: List[Chroma],
                              tag_k: int = 5,
                              know_k: int = 10):
    """获取指定问题中的相似文档"""
    # 匹配的标签
    matched_tags = [
        tag.page_content for tag in tag_vectordb.similarity_search(query, tag_k)
    ]
    # 通过匹配的标签查询相似的文本
    filter_dict = {"tag": {"$in": matched_tags}}
    # 这里使用的 cosine 距离 score=1-cosine距离，因此score越小相似度越高
    similar_docs = sorted([
        doc for db in knowledge_dbs
        for doc in db.similarity_search_with_score(query, know_k, filter=filter_dict)
    ],
                          key=lambda x: x[1])[:know_k]
    return similar_docs

def get_session():
    return str(uuid.uuid4())