import chromadb
# Initilise with your api key
import os
from dotenv import load_dotenv, find_dotenv
from embedding_model import EmbeddingClient
from MixSpliter import LegalChunkingPipeline,DataChunkPipeline
from llama_index.core import SimpleDirectoryReader

from llama_index.core.extractors import (
    TitleExtractor,
    SummaryExtractor, 
)
from llama_index.extractors.entity import EntityExtractor

from llama_index.core import StorageContext,VectorStoreIndex,load_index_from_storage
from llama_index.core import Settings
from llama_index.vector_stores.chroma import ChromaVectorStore
from CONSTANT import LEGAL_METADATA
from interface_for_llm import LlmaindexDeepseekClientWrapper
from llama_index.core.ingestion import IngestionPipeline
import random



_ = load_dotenv(find_dotenv(),override=True)


def jina_embedding_client():
    jina_api_key = os.environ['JINA_API_KEY']
    jina_api_model_name = os.environ['JINA_API_MODEL_NAME']

    myapi_client = EmbeddingClient(mode="jina", api_key= jina_api_key,model_name=jina_api_model_name)
    return myapi_client

def zhipuai_embedding_client():
    zhipuai_api_key = os.environ['ZHIPUAI_API_KEY']
    zhipuai_api_model_name = os.environ['ZHIPUAI_EMBEDDING_MODEL']
    myapi_client = EmbeddingClient(mode="zhipu", api_key= zhipuai_api_key,model_name=zhipuai_api_model_name)
    return myapi_client

def my_embedding_client():
    my_api_key = os.environ['MY_API_KEY']
    my_model_name = os.environ['MY_MODEL_NAME']
    api_url = os.environ['MY_MODEL_API_URL']
    myapi_client = EmbeddingClient(mode="myapi", api_key= my_api_key,model_name=my_model_name,model_kwargs={'api_url':api_url})
    return myapi_client


def test_embedding():
    file_path = "./基础PDF文件/民法典.pdf"
    legal_documents  = SimpleDirectoryReader(input_files=[file_path]).load_data()
    for legal_document in legal_documents:
        legal_document.matadata = LEGAL_METADATA
    

    embedding_model = my_embedding_client().get_embedding_model()
    chunking_pipeline  = LegalChunkingPipeline(embedding_model=embedding_model)
    Settings.embed_model = embedding_model
    print("start chunking")
    # 执行分块
    chunked_nodes = chunking_pipeline.chunk_documents(legal_documents)
    
    print("end chunking")
    
    print("start persist")
    VectorStoreIndex.from_documents
    
    index = VectorStoreIndex(chunked_nodes) 
    
    index.storage_context.persist("persist_data")
    print("end persist")

    # 检查结果（示例）
    for i, node in enumerate(chunked_nodes[:3]):
        print(f"块 {i+1}:")
        print(f"  元数据: {node.metadata}")
        print(f"  内容: {node.text[:100]}...")


def test_metadata():
    file_path = "./基础PDF文件/民法典.pdf"
    legal_documents  = SimpleDirectoryReader(input_files=[file_path]).load_data()
    for legal_document in legal_documents:
        legal_document.metadata = LEGAL_METADATA

    clientWrapper = LlmaindexDeepseekClientWrapper()
    llm = clientWrapper.get_client()
    embedding_model = my_embedding_client().get_embedding_model()

    Settings.embed_model = embedding_model
    Settings.llm = llm
    dcp = DataChunkPipeline(embedding_model=embedding_model)

    # sme = SummaryExtractor(summaries=["prev", "self", "next"], llm=llm)
    # tte = TitleExtractor(nodes=5)
    # print("type of tte:",type(tte))
    # print("type of sme:",type(sme))
    # print("type of legal_documents:",type(legal_documents[0]))
    # eee = EntityExtractor(
    #     prediction_threshold=0.5,
    #     label_entities=False,  # include the entity label in the metadata (can be erroneous)
    #     device="cuda",  # set to "cuda" if you have a GPU
    #     model_name=llm
    # )

    transformations=[dcp]
    pipeline = IngestionPipeline(transformations=transformations)

    nodes = pipeline.run(documents=legal_documents)
    
    samples = random.sample(nodes, 5)
    for node in samples:
        print(node.metadata)

if __name__=="__main__":
   test_metadata()