import os
from typing import List
from langchain_ollama import OllamaEmbeddings
from langchain_chroma import Chroma
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter


def list_txt_text(directory: str) -> List[str]:
    """
    List all TXT text paths from the specified directory and its subdirectories.

    Args:
        directory (str): The directory to search for text.

    Returns:
        List[str]: A list of paths to TXT text.
    """
    text_files = []
    for root, _, files in os.walk(directory):
        for file in files:
            if file.endswith('.txt'):
                text_files.append(os.path.join(root, file).replace("\\", "/"))
    return text_files


def load_text_db(db: str):
    '''
    Load or new one

    db: str, path to the directory where the db is saved
    '''
    # using local model to embed
    Embed = OllamaEmbeddings(
        model="nomic-embed-text")

    vector_store = Chroma(
        collection_name="dunhuang_text_db",
        embedding_function=Embed,
        persist_directory=db,
    )
    return vector_store


def append_text_db(db: str, text_dir: str):
    """
    db: str, path to the directory where the db is saved
    img_dir: str, path to the directory where the appended text are saved
    """
    vector_store = load_text_db(db)
    # vector_store.reset_collection()
    # load text
    txt_files = list_txt_text(text_dir)
    print(txt_files)

    # Use whole file with tile as Document and no chunking. This way is not good for long text search.
    # docs = [Document(page_content=(txt_file.split("/")[-2] + "窟 " + os.path.basename(txt_file).replace("_", "").replace(".txt", "") + ". " + open(txt_file, "r", encoding='utf-8').read()), metadata={"path": txt_file})
    #         for txt_file in txt_files]
    docs = [Document(page_content=open(txt_file, "r", encoding='utf-8').read(), metadata={"path": txt_file})
            for txt_file in txt_files]

    # Chunking, and tag the metadata in front of each chunk
    print("Chunking..")
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=100)
    all_splits = text_splitter.split_documents(docs)
    all_splits = [Document(page_content=(doc.metadata['path'].split("/")[-2] + "窟 " + os.path.basename(doc.metadata['path']
                                                                                                       ).replace("_", "").replace(".txt", "") + ". " + doc.page_content), metadata=doc.metadata) for doc in all_splits]
    # print(all_splits[0].page_content[:500],
    #       all_splits[1].page_content[:500], all_splits[2].page_content[:500])

    # Index text
    vector_store.add_documents(all_splits)
    print(
        f"Append all txt in {text_dir} to {vector_store._collection_name} done")
    return vector_store


def main():
    '''
    Run it from project root, to have a quick build and test
    '''
    # load db or build it
    db_path = "./data/dunhuang_text_db"
    vector_store = None
    if os.path.exists(db_path) and os.path.isdir(db_path) and os.listdir(db_path):
        vector_store = load_text_db(db_path)
    else:
        text_dir = "./data/dunhuang_raw"
        vector_store = append_text_db(db_path, text_dir)

    # query text
    # query_text = open("./data/dunhuang_raw/0023/莫高窟第023窟.txt",
    #                   "r", encoding='utf-8').read()
    # query_text = "莫高窟第023窟甬道顶部有什么" # 正常召回，原文本短小检索命中率高
    # query_text = "254窟主室南壁有什么"  # 该查询只能召回其中一个split
    query_text = "254窟主室南壁"  # 该查询可召回所有两个split
    print("Query text:", query_text)
    res = vector_store.similarity_search_with_score(
        query_text, k=5)
    for doc, score in res:
        print(
            f"\nResult text {doc.metadata, doc.page_content}, relevance score {score}")


if __name__ == "__main__":
    main()
