import os
from typing import Union

from langchain_community.document_loaders.pdf import PyPDFLoader
from langchain_community.document_loaders.text import TextLoader
from langchain_community.document_loaders.word_document import Docx2txtLoader
from langchain_core.documents import Document
from langchain_core.embeddings import Embeddings
from langchain_text_splitters import RecursiveCharacterTextSplitter
import dashscope



import db_opeartor


def create_knowledge_base(knowledge_base_name: str):
    return db_opeartor.create_collection(knowledge_base_name)


def create_knowledge_base_data(knowledge_base_name: str, filepath: Union[list[str], str], embedding: Embeddings):
    document_list = load_documents(filepath, knowledge_base_name, embedding)
    chunked_document_list = chunked_documents(document_list)

    if db_opeartor.client.collection_exists(knowledge_base_name):
        db = db_opeartor.get_collection_db(knowledge_base_name, embedding=embedding)
        db.add_documents(documents=chunked_document_list)
    else:
        db_opeartor.create_collection(knowledge_base_name)
        db = db_opeartor.get_collection_db(knowledge_base_name, embedding=embedding)
        db.add_documents(documents=chunked_document_list)

    # return Qdrant.from_documents(
    #     documents=chunked_document_list,  # 以分块的文档
    #     embedding=embedding,  # Embedding Model做嵌入
    #     path=config.db_path,
    #     collection_name=knowledge_base_name)


def delete_knowledge_base(knowledge_base_name: str):
    return db_opeartor.delete_collection(knowledge_base_name)


def load_documents(file_paths: Union[list[str] | str], collection_name: str, embedding: Embeddings) -> list[Document]:
    """
    filePaths:单个文件路径，或者文件完整路径列表
    collection_name：指定文件要加载到哪个集合
    """
    file_list = []
    documents = []
    if isinstance(file_paths, str):  # 传入一个目录
        for file in os.listdir(file_paths):
            file_path = os.path.join(file_paths, file)
            file_list.append(file_path)
    else:
        file_list = file_paths
    for file in file_list:
        if check_document_isin_collection(file.split("\\")[-1], collection_name, embedding):
            continue
        if file.endswith('.pdf'):
            loader = PyPDFLoader(file)
            documents.extend(loader.load())
        elif file.endswith('.docx'):
            loader = Docx2txtLoader(file)
            documents.extend(loader.load())
        elif file.endswith('.txt'):
            loader = TextLoader(file, encoding="utf-8")
            documents.extend(loader.load())

            # 将文件名也存入，方便后续判断文件是否已经向量化
        name_doc = Document(page_content=collection_name,
                            metadata={"title": file.split("\\")[-1], "type": "filename", "source": file})

        documents.append(name_doc)
    return documents


def chunked_documents(documents: list[Document]):
    """
    将Documents切分成块以便后续进行嵌入和向量存储
    :return:
    """
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=200, chunk_overlap=10)
    chunked_documents = text_splitter.split_documents(documents)
    return chunked_documents


def check_document_isin_collection(filename, collection_name: str, embedding: Embeddings):
    """
    检验文件在向量数据库中是否已经存在
    """
    if db_opeartor.client.collection_exists(collection_name):
        db = db_opeartor.get_collection_db(collection_name, embedding)
        docs = db.similarity_search(collection_name, filter={"type": 'filename'})
        if len(docs) > 0:
            for doc in docs:
                if doc.metadata['title'] == filename:
                    return True
        else:
            return False
    else:
        return False


def get_knowledge() -> list[str]:
    knowledge = []
    colist = db_opeartor.client.get_collections()
    for a in colist.collections:
        knowledge.append(a.name)
    return knowledge


def get_knowledge_list() -> list[list[str]]:
    list = get_knowledge()
    knowledge_data = [[name] for name in list]
    return knowledge_data


def get_knowledge_base_file(collection_name: str, embedding: Embeddings):
    db = db_opeartor.get_collection_db(collection_name, embedding)
    docs = db.similarity_search(collection_name, filter={"type": 'filename'})
    if len(docs) > 0:
        return [doc.metadata['source'] for doc in docs]
    else:
        return []
