#from langchain_qdrant import QdrantVectorStore
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, StorageContext, ServiceContext
from llama_index.core.base.embeddings.base import similarity
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_index.llms.huggingface import HuggingFaceLLM
from qdrant_client import QdrantClient
import os

from qdrant_client.http.models import VectorParams, Distance

# ----------- 关键修正区域 -----------
# 彻底清除OpenAI相关环境变量
os.environ["OPENAI_API_KEY"] = ""
os.environ["OPENAI_API_BASE"] = ""

# 离线文档处理，文档加载，切割，向量化存储
def ingest_to_db():
    # 提取pdf内容
    #可

    # 分块工具处理TextSplitter
    # text_splitter = RecursiveCharacterTextSplitter(chunk_size=512, chunk_overlap=50)

    # texts = text_splitter.split_text(pdf_text)
    # print(texts, len(texts))
    # 创建一个VectorStoreIndex对象，并将文档作为参数传入构建索引，次索引将文档转换为向量，默认放在内存中。
    db_client = QdrantClient(host="localhost",port=6333,timeout=30)

    Settings.embed_model = HuggingFaceEmbedding(model_name=r"D:\self\python\AIModel\all-MiniLM-L6-v2",device="cpu")
    vector_size = Settings.embed_model._model.get_sentence_embedding_dimension()  # 获取实际维度
    # print(vector_size)

    db_client.create_collection(collection_name="rag_3",vectors_config=VectorParams(size=vector_size, distance=Distance.COSINE))

    # 创建集合

    # 使用LlamaIndex集成Qdrant
    vector_store = QdrantVectorStore(collection_name="rag_3",client=db_client)
    # 配置存储上下文
   # storage_context = StorageContext.from_defaults(vector_store=vector_store)
    # service_context = ServiceContext.from_defaults(embed_model=embed_model)

    llm = HuggingFaceLLM(model_name=r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct",tokenizer_name=r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct",model_kwargs={"trust_remote_code":True},tokenizer_kwargs={"trust_remote_code":True})

    #设置全局llm属性，这样在所以查询时，会使用该模型
    Settings.llm = None
    # 使用本地嵌入模型（如sentence-transformers）
    # 指定一个预训练的sentence-transformers模型，这里使用"sentence-transformers/all-MiniLM-L6-v2"模型
    #嵌入模型：维度必须匹配

    # 从指定目录读取所有文档，将加载到内存中，requeired_exts只加载指定扩展名的文档
    documents = SimpleDirectoryReader(input_dir="./data",required_exts=[".md"]).load_data()

    index = VectorStoreIndex.from_documents(
        documents,
        vector_store=vector_store,
        show_progress=True
    )

    query_engine = index.as_query_engine(
        vector_store_query_mode="hybrid"  # 混合搜索模式
    )

    response = query_engine.query("数据分析工具链")

    print(f"{response}")



def main():
    # 初始化一个HuggingFaceEmbedding对象，用于将文本转换为向量表示。
    # 指定一个预训练的sentence-transformers模型，这里使用"sentence-transformers/all-MiniLM-L6-v2"模型
    embed_model = HuggingFaceEmbedding(model_name=r"D:\self\python\AIModel\paraphrase-multilingual-MiniLM-L12-v2")

    # 将创建的嵌入模型赋值给全局设置的embed_model属性

    Settings.embed_model = embed_model

    llm = HuggingFaceLLM(model_name=r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct",tokenizer_name=r"D:\self\python\AIModel\Qwen2.5-0.5B-Instruct",model_kwargs={"trust_remote_code":True},tokenizer_kwargs={"trust_remote_code":True})

    #设置全局llm属性，这样在所以查询时，会使用该模型
    Settings.llm = llm


if __name__ == '__main__':
    ingest_to_db()