import json
import os
import time
from typing import List

import sys

from playgroud.test_split import get_chunks, chunks_to_documents

sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))

from langchain_community.document_loaders import UnstructuredMarkdownLoader, TextLoader, JSONLoader
from langchain_core.documents import Document
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnablePassthrough, RunnableLambda
from langchain_text_splitters import RecursiveCharacterTextSplitter, RecursiveJsonSplitter
from langchain_chroma import Chroma
from langchain_community.embeddings import ModelScopeEmbeddings
from langchain.embeddings import SentenceTransformerEmbeddings,HuggingFaceBgeEmbeddings

import ssl




def timeit(func):
    def wrapper(*args, **kwargs):
        start_time = time.time()
        result = func(*args, **kwargs)
        end_time = time.time()

        print(f"{func.__name__} finished in {end_time - start_time:.6f} seconds.")

        return result

    return wrapper

class VectorData():

    def __init__(self,file_path, embedding_path=None,collection_name="demo_collection"):
        self.file_path = file_path
        self.collection_name = collection_name
        from langchain_chroma import Chroma
        from langchain_huggingface import HuggingFaceEmbeddings
        if embedding_path is not None:
            self.embedding = HuggingFaceEmbeddings(model_name=embedding_path)
        else:
            self.embedding = None
        self.vector_store = self.get_vector_store()

    def get_vector_store(self):
        from langchain_milvus import Milvus, BM25BuiltInFunction
        URI = "http://8.146.204.90:19530"
        vectorstore = Milvus(
            collection_name=self.collection_name,
            embedding_function=self.embedding,
            builtin_function=BM25BuiltInFunction(),  # output_field_names="sparse"),
            vector_field=["dense", "sparse"],
            # index_params={"index_type": "FLAT","metric_type": "IP"},
            connection_args={
                "uri": URI,
            },
            consistency_level="Strong",
            drop_old=False,
            auto_id=True,
            
        )
        return vectorstore
    

    
    



        

    def docs_to_vector(self, docs: List[Document]):
        self.vector_store.add_documents(docs)
    
    def init_vector_data(self):
        # docs = self.get_docs()
        # print(f"切割开始 一共{len(docs)}个document")
        # split_docs = self.split_docs(docs)
        # print("切割完毕")
        docs = MarkdownSplit(self.file_path)
        self.docs_to_vector(docs)

    def vector_search(self,query:str,k=5):
        # vector_store = Chroma(persist_directory=self.vector_store_path, embedding_function=self.embedding)
        # result = self.vector_store.similarity_search(query,k=k)
        # result = self.vector_store.similarity_search(query, k=1, ranker_type="rrf", ranker_params={"k": 5})

        # result = self.vector_store.similarity_search(query, k=1, ranker_type="rrf", ranker_params={"k": 5})
        result = self.vector_store.similarity_search(query, k=3, ranker_type="weighted", ranker_params={"weights":[0.5, 0.5]})
        return result

    
    # def vector_search2(self,query:str,k=5):
    #     # vector_store = Chroma(persist_directory=self.vector_store_path, embedding_function=self.embedding)
    #     result = self.vector_store.similarity_search(query,k=k)
    #     return result
    

if __name__ == '__main__':
    # all_chunks = get_chunks('data/demo.json')
    # docs = chunks_to_documents(all_chunks)
    #
    embedding_path = r"D:\huggingface\modelscope\hub\bge-base-zh-v1.5"
    vectordata = VectorData('data/demo.json',embedding_path=embedding_path,collection_name="p8_1")
    # vectordata.docs_to_vector(docs)
    # print(docs)
    print(vectordata.vector_search("香草布里欧修吐司 少油少糖的详细做法"))

    # print("向量库初始化完毕")






