from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.vectorstores.faiss import FAISS
from langchain.schema import Document
from PyCmpltrtok.common import sep, get_dir_name_ext
from PyCmpltrtok.common_np import normalize_one
import os
from text_splitter.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter

is_large = False
# normalize_L2 = False
normalize_L2 = True
# distance_strategy="METRIC_INNER_PRODUCT"
distance_strategy="EUCLIDEAN_DISTANCE"

sep('Embeddings (m3e-base)')
embed_model_path = "/home/yunpeng/models/hf/m3e-base"
embeddings = HuggingFaceEmbeddings(model_name=embed_model_path, model_kwargs={'device': 'cuda:0'})

sep('build empty vector store')
doc = Document(page_content="init", metadata={})
vector_store = FAISS.from_documents(
    [doc], 
    embeddings, 
    normalize_L2=normalize_L2,
    distance_strategy=distance_strategy,
)
ids = list(vector_store.docstore._dict.keys())
vector_store.delete(ids)

sep('split doc')
xdir, xbase, xext = get_dir_name_ext(os.path.abspath(__file__))
if is_large:
    xfile = '韩寒新作《光荣日》.utf8-unix.tmp.txt'
    xpath = os.path.join(xdir, xfile)
else:
    xfile = '20岁，在心里-1.utf8-unix.txt'
    xpath = os.path.join(xdir, xfile)
print(f'path={xpath}')
text_splitter = ChineseRecursiveTextSplitter(
    keep_separator=True,
    is_separator_regex=True,
    # chunk_size=50,
    chunk_size=250,
    # chunk_overlap=0
    chunk_overlap=50
)    
with open(xpath, 'r', encoding='utf8') as f:
    text = f.read()
chunks = text_splitter.split_text(text)
sep('check split')
for i, chunk in enumerate(chunks[:5] + chunks[-5:]):
    n = i + 1
    print(n, f'|{chunk}|')

sep('do embedding')
docs = [Document(page_content=chunk, metadata={'source': xpath}) for chunk in chunks]
vectors = embeddings.embed_documents(chunks)
list_text_and_embedding = list(zip(chunks, vectors))

sep('Add to vs')
ids = vector_store.add_embeddings(list_text_and_embedding, [doc.metadata for doc in docs])

sep('Save vs')
save_path = os.path.join(xdir, f'{xbase}.{xfile}.normalize_L2-{normalize_L2}.{distance_strategy}.tmp.faiss')
print(f'Saving to |{save_path}|.')
vector_store.save_local(save_path)
print(f'Saved to |{save_path}|.')

sep('try to search')
while True:
    sep()
    print('Seek what? (q for quit)')
    query = input().strip()
    if query in set(list(['q', 'Q'])):
        break
    query_vector = embeddings.embed_query(query)
    # query_vector = normalize_one(query_vector)  # not needed
    result = vector_store.similarity_search_with_score_by_vector(query_vector, 4, score_threshold=0.6)
    for i, (doc, score) in enumerate(result):
        n = i + 1
        print(n, score, doc)
        
sep('All over')