from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.schema import Document
from PyCmpltrtok.common import sep, get_dir_name_ext
from PyCmpltrtok.common_np import normalize_one
import os
from text_splitter.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
from x010000_vs_build_and_search_qwen_api import QwenWorker, MyEmbeddings

if '__main__' == __name__:
    is_large = False
    # normalize_L2 = False
    normalize_L2 = True
    # distance_strategy="METRIC_INNER_PRODUCT"
    distance_strategy="EUCLIDEAN_DISTANCE"

    sep('My embeddings (qwen-api)')
    worker = QwenWorker()
    embeddings = MyEmbeddings(worker=worker)

    sep('load vs')
    xdir, xbase, xext = get_dir_name_ext(os.path.abspath(__file__))
    if is_large:
        xfile = '韩寒新作《光荣日》.utf8-unix.tmp.txt'
        xpath = os.path.join(xdir, xfile)
    else:
        xfile = '20岁，在心里-1.utf8-unix.txt'
        xpath = os.path.join(xdir, xfile)
    print(f'path={xpath}')
    save_path = os.path.join(xdir, f'x010000_vs_build_and_search_qwen_api.{xfile}.normalize_L2-{normalize_L2}.{distance_strategy}.tmp.faiss')
    print(f'loading from {save_path}')
    vector_store = FAISS.load_local(
        save_path, 
        embeddings, 
        normalize_L2=normalize_L2,
        distance_strategy=distance_strategy,
    )

    sep('try to search')
    while True:
        sep()
        print('Seek what? (q for quit)')
        query = input().strip()
        if query in set(list(['q', 'Q'])):
            break
        query_vector = embeddings.embed_query(query)
        # query_vector = normalize_one(query_vector)  # not needed
        result = vector_store.similarity_search_with_score_by_vector(query_vector, 4, score_threshold=1.0)
        for i, (doc, score) in enumerate(result):
            n = i + 1
            print(n, score, doc)
            
    sep('All over')