from configs import *
from langchain.embeddings import HuggingFaceBgeEmbeddings
# from langchain_community.embeddings import HuggingFaceBgeEmbeddings
from langchain.vectorstores import FAISS
from langchain.vectorstores.base import VectorStore
from langchain.docstore.document import Document
import os
import pandas as pd
import json
import gc
import logging
from datetime import datetime
logging.basicConfig(filename='debug.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
from loguru import logger
logger.add("api_nohup.log",)
logger.configure(handlers=[
        {"sink": "api_nohup.log", }
    ])
class FaissSearch:
    def __init__(self, type=None, emb_model_type=None):
        if emb_model_type is None:
            self.emb_model_type = EMB_MODEL_TYPE
        else:
            self.emb_model_type = emb_model_type
        self.emb_model = None
        self.emb_model_path = EMB_MODEL_DICT[self.emb_model_type]
        
        self.last_update_date = None 
        self.config_path = "./params.json"

        self.vector_store = None
        self.type = type
        if type is None:
            self.vector_store_path = f"./Faiss/{self.emb_model_type}/vector_store"
        else:
            self.vector_store_path = f"./Faiss/{self.emb_model_type}/{type}_vector_store"
        

    def load(self):
        try:
            if os.path.exists(self.config_path):
                with open(self.config_path, "r") as f:
                    self.last_update_date = json.load(f)[self.emb_model_type]["last_update_date"]
            if self.last_update_date is not None:
                self.last_update_date = datetime.fromisoformat(self.last_update_date)
        except:
            pass
        print("1. 加载Embedding模型中...")
        #### Embedding ####
        
        if "bge" in self.emb_model_type:
            encode_kwargs = {'normalize_embeddings': True,"device":0} 
            self.emb_model = HuggingFaceBgeEmbeddings(model_name=self.emb_model_path,
                                                      encode_kwargs=encode_kwargs,
                                                      query_instruction="为这个句子生成表示以用于检索相关文章：")
        else:
            raise Exception(f"{self.emb_model_type}暂不支持")
        
        print("2. 加载FAISS库中...")
        #### Faiss #### 
        try:
            if os.path.exists(self.vector_store_path) and os.path.isdir(self.vector_store_path):
                self.vector_store = FAISS.load_local(self.vector_store_path, self.emb_model)
            else:
                self.vector_store = FAISS.from_texts([""], self.emb_model)
                self.delete_all()
                self.vector_store.save_local(self.vector_store_path)

        except Exception as e:
            raise Exception(e)
        print("3. Embedding模型 & FAISS库 加载完成")

    def save_params(self,):
        if os.path.exists(self.config_path):
            with open(self.config_path, "r") as f:
                params = json.load(f)
            params[self.emb_model_type]["last_update_date"] = self.last_update_date
        else:
            params = {self.emb_model_type:{"last_update_date":self.last_update_date}}
        
        if self.last_update_date is not None:
            params[self.emb_model_type]["last_update_date"] = self.last_update_date.isoformat()

        with open(self.config_path, "w") as f:
                f.write(json.dumps(params, ensure_ascii=False, indent=4))
        
    def get_size(self,):
        return len(self.vector_store.index_to_docstore_id)
    
    def get_info(self):
        emb_dim = len(self.emb_model.embed_documents(["test"])[0])
        return {"vector_store_path":self.vector_store_path,
                "vector_store_type":"FAISS",
                "vector_store_size":len(self.vector_store.index_to_docstore_id),
                "emb_model_path":self.emb_model_path,
                "emb_model":self.emb_model_type,
                "emb_dim":emb_dim,
                }


    def add_texts(self, texts, metadatas=None):

        texts = [t.replace("\n", " ") for t in texts]

        start_ind = 0
        texts_batch_size = 102400
        logger.info(f"{start_ind}/{len(texts)} of data has been saved successfully")
        while start_ind < len(texts):
            sliced_texts = texts[start_ind:start_ind+texts_batch_size]
            if metadatas is None:
                sliced_metadatas = ({"id":None} for _ in range(len(sliced_texts)))
            else:
                sliced_metadatas = (i for i in metadatas[start_ind:start_ind+texts_batch_size])
            sliced_embeddings = self.emb_model.client.encode(sliced_texts, 
                                                    batch_size=128,
                                                    show_progress_bar=True,
                                                    normalize_embeddings=True
                                                    )

            sliced_text_embeddings = [(text, embedding) for text, embedding in zip(sliced_texts,sliced_embeddings)]

            self.vector_store.add_embeddings(sliced_text_embeddings, sliced_metadatas)
            self.vector_store.save_local(self.vector_store_path)

            start_ind += texts_batch_size
            logger.info(f"{start_ind}/{len(texts)} of data has been saved successfully")
            torch.cuda.empty_cache()
            gc.collect()
            # torch.cuda.reset()
        logger.info(f"{len(texts)} of data has been saved finished successfully")
        logger.info("===================================================")

    def update_check_file(self,):
        with open(f"./Faiss/{self.emb_model_type}/check_file.txt", 'w') as file:
            for ind, doc_key in self.vector_store.index_to_docstore_id.items():
                doc = self.vector_store.docstore.search(doc_key)
                string = repr(f"index={ind}, page_content={doc.page_content}, metadata={doc.metadata}")
                file.write(string + '\n')


    def delete_all(self):
        all_ids = [k for k, v in self.vector_store.docstore._dict.items()]
        self.vector_store.delete(all_ids)
        self.vector_store.save_local(self.vector_store_path)
        self.last_update_date = None
        self.save_params()

    def similarity_search(self, query, top_k):
        logger.info(f"start search for: {query}: ")
        result_docs = self.vector_store.similarity_search_with_score(query=query, k=top_k)
        # result_strs = '\n'.join([f"[{i+1}] {result_docs[i][0].page_content}" for i in range(len(result_docs))])
        # results = {result_docs[i][0].page_content:float(result_docs[i][1]) for i in range(len(result_docs))}
        # indexes = [result_docs[i].]
        result_texts = [result_docs[i][0].page_content for i in range(len(result_docs))]
        
        scores = [float(result_docs[i][1]) for i in range(len(result_docs))]
        result_metadatas = [result_docs[i][0].metadata for i in range(len(result_docs))]
        result_metadatas_w_score = [{'score': scores[i], **result_metadatas[i]} for i in range(len(result_metadatas))]

        # results = {result_docs[i][0].page_content:{"score":float(result_docs[i][1],} for i in range(len(result_docs))}
        results = {result_texts[i]:result_metadatas_w_score[i] for i in range(len(result_docs))}
        
        return results



