from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from tqdm import tqdm
import pickle
import json
import path


def get_from_textdb(prompt, pace_list, chromadb_path, pickle_path):
    db = Chroma(
        persist_directory=chromadb_path,
        embedding_function=HuggingFaceEmbeddings(model_name=path.MODEL_NAME),
    )
    docs = db.similarity_search(prompt)
    specific_doc = docs[0].page_content
    with open(pickle_path, "rb") as f:
        textdb = pickle.load(f)
    specific_doc_index = textdb.index(specific_doc)
    result_list = []
    for pace in pace_list:
        result = ""
        for i in range(specific_doc_index - pace, specific_doc_index + pace + 1):
            if i >= 0 and i < len(textdb):
                result += textdb[i]
        result_list.append(result)
    return result_list


def clean_text(text):
    return text.replace('\n', '').replace('\r', '').replace('\t', '').replace('■', '').replace("□", "")


retrieve_answer_path = "../answer/answer_v6_80_234.json"

with open(path.QUESTION_PATH, 'r', encoding='utf-8') as f, \
        open(retrieve_answer_path, "w", encoding="utf-8") as f1:
    questions_list = json.load(f)
    for question in tqdm(questions_list):
        query = question['question']
        res = get_from_textdb(
            query, [2, 3, 4], path.VDB_CACHE, path.PICKLE_PATH)

        question["answer_1"] = clean_text(res[0])
        question["answer_2"] = clean_text(res[1])
        question["answer_3"] = clean_text(res[2])
    json.dump(questions_list, f1, ensure_ascii=False, indent=4)
