from langchain.embeddings.base import Embeddings
from langchain.vectorstores.faiss import FAISS
from langchain.schema import Document
from PyCmpltrtok.common import sep, get_dir_name_ext
from PyCmpltrtok.common_np import normalize_one
import os
from typing import List
from text_splitter.chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
from server.model_workers.qwen import QwenWorker
from server.model_workers.base import ApiEmbeddingsParams


class MyEmbeddings(Embeddings):
    def __init__(self, worker):
        self.worker = worker

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        params = ApiEmbeddingsParams(texts=texts)
        resp = self.worker.do_embeddings(params)
        embeddings = resp['data']
        return embeddings

    def embed_query(self, text: str) -> List[float]:
        embeddings = self.embed_documents([text])
        embedding = embeddings[0]
        return embedding


if '__main__' == __name__:
    is_large = False
    # normalize_L2 = False
    normalize_L2 = True
    # distance_strategy="METRIC_INNER_PRODUCT"
    distance_strategy="EUCLIDEAN_DISTANCE"

    sep('My embeddings (qwen-api)')
    worker = QwenWorker()
    embeddings = MyEmbeddings(worker=worker)
    
    sep('build empty vector store')
    doc = Document(page_content="init", metadata={})
    vector_store = FAISS.from_documents(
        [doc], 
        embeddings, 
        normalize_L2=normalize_L2,
        distance_strategy=distance_strategy,
    )
    ids = list(vector_store.docstore._dict.keys())
    vector_store.delete(ids)

    sep('split doc')
    xdir, xbase, xext = get_dir_name_ext(os.path.abspath(__file__))
    if is_large:
        xfile = '韩寒新作《光荣日》.utf8-unix.tmp.txt'
        xpath = os.path.join(xdir, xfile)
    else:
        xfile = '20岁，在心里-1.utf8-unix.txt'
        xpath = os.path.join(xdir, xfile)
    print(f'path={xpath}')
    text_splitter = ChineseRecursiveTextSplitter(
        keep_separator=True,
        is_separator_regex=True,
        # chunk_size=50,
        chunk_size=250,
        # chunk_overlap=0
        chunk_overlap=50
    )    
    with open(xpath, 'r', encoding='utf8') as f:
        text = f.read()
    chunks = text_splitter.split_text(text)
    sep('check split')
    for i, chunk in enumerate(chunks[:5] + chunks[-5:]):
        n = i + 1
        print(n, f'|{chunk}|')

    sep('do embedding')
    docs = [Document(page_content=chunk, metadata={'source': xpath}) for chunk in chunks]
    vectors = embeddings.embed_documents(chunks)
    list_text_and_embedding = list(zip(chunks, vectors))

    sep('Add to vs')
    ids = vector_store.add_embeddings(list_text_and_embedding, [doc.metadata for doc in docs])

    sep('Save vs')
    save_path = os.path.join(xdir, f'{xbase}.{xfile}.normalize_L2-{normalize_L2}.{distance_strategy}.tmp.faiss')
    print(f'Saving to |{save_path}|.')
    vector_store.save_local(save_path)
    print(f'Saved to |{save_path}|.')

    sep('try to search')
    while True:
        sep()
        print('Seek what? (q for quit)')
        query = input().strip()
        if query in set(list(['q', 'Q'])):
            break
        query_vector = embeddings.embed_query(query)
        # query_vector = normalize_one(query_vector)  # not needed
        result = vector_store.similarity_search_with_score_by_vector(query_vector, 4, score_threshold=1.0)
        for i, (doc, score) in enumerate(result):
            n = i + 1
            print(n, score, doc)
            
    sep('All over')