from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.schema import Document
from PyCmpltrtok.common import sep, get_dir_name_ext
from PyCmpltrtok.common_np import normalize_one
import os
import time
from text_splitter.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
from threading import Thread, Lock, Event

if '__main__' == __name__:
    is_large = False
    # normalize_L2 = False
    normalize_L2 = True
    # distance_strategy="METRIC_INNER_PRODUCT"
    distance_strategy="EUCLIDEAN_DISTANCE"

    sep('embedding model')
    embed_model_path = "/home/yunpeng/models/hf/m3e-base"
    embeddings = HuggingFaceEmbeddings(model_name=embed_model_path, model_kwargs={'device': 'cuda:0'})

    sep('load vs')
    xdir, xbase, xext = get_dir_name_ext(os.path.abspath(__file__))
    if is_large:
        xfile = '韩寒新作《光荣日》.utf8-unix.tmp.txt'
        xpath = os.path.join(xdir, xfile)
    else:
        xfile = '20岁，在心里-1.utf8-unix.txt'
        xpath = os.path.join(xdir, xfile)
    print(f'path={xpath}')
    save_path = os.path.join(xdir, f'x008000_vs_build_and_search.{xfile}.normalize_L2-{normalize_L2}.{distance_strategy}.tmp.faiss')
    vector_store = FAISS.load_local(
        save_path, 
        embeddings, 
        normalize_L2=normalize_L2,
        distance_strategy=distance_strategy,
    )

    sep('try to search (multi-thread)')
    
    mutex_embedding = Lock()
    mutex_print = Lock()
    print_at_last = Event()
    start_to_search_together = Event()
    counter_mutex = Lock()
    n_workers = 0
    
    def query_it(query: str, world_size: int):
        global n_workers
        
        mutex_embedding.acquire()
        sep(f'E: {query}')
        query_vector = embeddings.embed_query(query)
        # query_vector = normalize_one(query_vector)  # not needed
        mutex_embedding.release()
        
        with counter_mutex:
            n_workers += 1
        if n_workers < world_size:
            start_to_search_together.wait()
        else:
            start_to_search_together.set()
        sep(f'Q: {query}')
        result = vector_store.similarity_search_with_score_by_vector(query_vector, 4, score_threshold=0.6)
        
        print_at_last.wait()
        mutex_print.acquire()
        sep(f'A: {query}')
        for i, (doc, score) in enumerate(result):
            n = i + 1
            print(n, score, doc)
        mutex_print.release()
        
            
    queries = [
        '茉茉是谁？',
        '老师是谁？',
        '谁近视了？',
        '有一天，茉茉发现一位中年妇人打开了那间地窖的门，拿了笤帚里里外外地打扫。她大惊失色，下午上学把你召来开会。',
        '你有了一个一米半高的书架，里面摆满了各种少儿读物。你还有几只大纸箱娃娃画报。',
    ]
    
    th_list = []
    for query in queries:
        th = Thread(target=query_it, args=(query, len(queries)))
        th_list.append(th)
        th.start()
        
    time.sleep(2.0)
    print_at_last.set()
    for th in th_list:
        th.join()
            
    sep('All over')