import json
import os
import time

import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, StorageContext, Settings
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.extractors import TitleExtractor
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.node_parser import TokenTextSplitter, SentenceSplitter
from llama_index.core.postprocessor import SentenceTransformerRerank
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import QueryFusionRetriever
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
from llama_index.readers.file import PyMuPDFReader
from llama_index.vector_stores.chroma import ChromaVectorStore
from pydantic.v1 import BaseModel


if __name__ == "__main__":

    os.environ["OPENAI_API_KEY"] = os.environ["OPENAI_API_KEY_ZHIHU"]
    os.environ["OPENAI_API_BASE"] = os.environ["OPENAI_API_BASE_ZHIHU"]
    os.environ["OPENAI_BASE_URL"] = os.environ["OPENAI_API_BASE_ZHIHU"]

    # 创建 ChromaDB 向量数据库，并持久化到本地
    chroma_client = chromadb.PersistentClient(path="./chroma_db")

    # 1. 指定全局llm与embedding模型
    Settings.llm = OpenAI(temperature=0, model="gpt-4o")
    Settings.embed_model = OpenAIEmbedding(model="text-embedding-3-small", dimensions=512)
    # 2. 指定全局文档处理的 Ingestion Pipeline
    Settings.transformations = [SentenceSplitter(chunk_size=300, chunk_overlap=100)]

    # 3. 加载本地文档
    documents = SimpleDirectoryReader("./llamaIndexDatas", file_extractor={".pdf": PyMuPDFReader()}).load_data()

    # 4. 新建 collection
    collection_name = hex(int(time.time()))
    chroma_collection = chroma_client.get_or_create_collection(collection_name)

    # 5. 创建 Vector Store
    vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
    # 6. 指定 Vector Store 的 Storage 用于 index
    storage_context = StorageContext.from_defaults(vector_store=vector_store)
    index = VectorStoreIndex.from_documents(
        documents, storage_context=storage_context
    )

    # 7. 定义检索后排序模型
    reranker = SentenceTransformerRerank(
        model="C:/Users/adind/.cache/modelscope/hub/quietnight/bge-reranker-large", top_n=2
    )

    # 8. 定义 RAG Fusion 检索器
    fusion_retriever = QueryFusionRetriever(
        [index.as_retriever()],
        similarity_top_k=5,  # 检索召回 top k 结果
        num_queries=3,  # 生成 query 数
        use_async=True,
        # query_gen_prompt="...",  # 可以自定义 query 生成的 prompt 模板
    )

    # 9. 构建单轮 query engine
    query_engine = RetrieverQueryEngine.from_args(
        fusion_retriever,
        node_postprocessors=[reranker]
    )

    # 10. 对话引擎
    chat_engine = CondenseQuestionChatEngine.from_defaults(
        query_engine=query_engine,
        # condense_question_prompt=... # 可以自定义 chat message prompt 模板
    )

    while True:
        question = input("User:")
        if question.strip() == "":
            break
        response = chat_engine.chat(question)
        print(f"AI: {response}")
