
import os
import dotenv
from langchain_community.document_loaders import TextLoader
from langchain_core.documents import Document
from langchain_openai import OpenAIEmbeddings
from langchain_text_splitters import CharacterTextSplitter
from langchain_redis import RedisVectorStore

def fun_1():
    dotenv.load_dotenv()

    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )

    # 配置Redis连接
    redis_url = "redis://localhost:6379"

    # 修改此处：将 'embedding' 改为 'embeddings'
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,  # 注意这里改为 embeddings
        index_name="my_documents"
    )

    # 加载TXT文档
    loader = TextLoader(file_path="F:/data/1.txt", encoding="utf-8")
    documents = loader.load()

    # 创建文本分块器
    test_splitter = CharacterTextSplitter(chunk_size=100, chunk_overlap=10)
    chunks = test_splitter.split_documents(documents)

    # 将文档添加到向量存储
    vector_store.add_documents(chunks)
    print("文档已成功添加到Redis向量存储")
    print(f"向量数据库中存储了 {len(chunks)} 个向量")

#fun_1()

def fun_2():
    dotenv.load_dotenv()

    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )

    # 配置Redis连接
    redis_url = "redis://localhost:6379"

    # 修改此处：将 'embedding' 改为 'embeddings'
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,  # 注意这里改为 embeddings
        index_name="my_documents"
    )

    retriever = vector_store.as_retriever() # 创建一个向量搜索器
    query = "协和"
    relevant_docs = retriever.invoke(query)  # 搜索相关文档

    print(f"\n查询: {query}")
    print("相关文档:")
    for i, doc in enumerate(relevant_docs):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   来源: {doc.metadata}")

#fun_2()

def fun_3():
    documents = [
        Document(
            page_content="近日，中国科学家在量子计算领域取得重大突破，实现了100量子比特的纠缠态操控。",
            metadata={"source": "science_news.txt", "category": "科技新闻", "date": "2024-01-15"}
        ),
        Document(
            page_content="《三体》是刘慈欣创作的科幻小说三部曲，包括《三体》、《黑暗森林》和《死神永生》。",
            metadata={"source": "literature_review.txt", "category": "文学作品", "author": "刘慈欣"}
        ),
        Document(
            page_content="人工智能在医疗诊断中的应用越来越广泛，特别是在影像识别方面表现突出。",
            metadata={"source": "ai_research.txt", "category": "学术研究", "field": "医疗AI"}
        ),
        Document(
            page_content="巴黎奥运会筹备工作进入冲刺阶段，预计将有超过200个国家和地区参与。",
            metadata={"source": "sports_news.txt", "category": "体育新闻", "event": "2024巴黎奥运会"}
        ),
        Document(
            page_content="区块链技术不仅应用于加密货币，还在供应链管理、数字身份验证等领域展现潜力。",
            metadata={"source": "tech_trends.txt", "category": "技术趋势", "topic": "区块链"}
        ),
        Document(
            page_content="《红楼梦》是中国古典四大名著之一，描写了贾宝玉与林黛玉的爱情悲剧。",
            metadata={"source": "classic_literature.txt", "category": "古典文学", "dynasty": "清朝"}
        )
    ]

    dotenv.load_dotenv()

    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )

    sample_embedding = embedding_model.embed_query("示例文本")
    print(f"嵌入维度: {len(sample_embedding)}")

    # 配置Redis连接
    redis_url = "redis://localhost:6379"

    # 修改此处：将 'embedding' 改为 'embeddings'
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model  # 注意这里改为 embeddings
        ,index_name="my_documents_1"
        ,metadata_schema=[  # 明确指定哪些元数据字段可用于过滤
        {"name": "category", "type": "tag"},
        {"name": "source", "type": "tag"}
    ]
    )

    vector_store.add_documents(documents)
    print(f"向量数据库中存储了 {len(documents)} 个向量")

#fun_3()

#使用相似性检索
def fun_4():
    dotenv.load_dotenv()
    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )
    redis_url = "redis://localhost:6379"
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,
        index_name="my_documents"
    )
    query = "小说"

    retriever = vector_store.as_retriever()
    relevant_docs = vector_store.similarity_search( query , k=2)
    print(f"\n查询: {query}")
    print("相关文档:")
    for i, doc in enumerate(relevant_docs):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   来源: {doc.metadata}")

#fun_4()

#直接使用问题的向量查询
def fun_5():
    dotenv.load_dotenv()
    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )
    redis_url = "redis://localhost:6379"
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,
        index_name="my_documents"
    )
    query = "小说"
    query_embedding = embedding_model.embed_query(query)
    relevant_docs = vector_store.similarity_search_by_vector(query_embedding , k=2, metadata_filter={"category": "科技新闻"})
    print(f"\n查询: {query}")
    print("相关文档:")
    for i, doc in enumerate(relevant_docs):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   来源: {doc.metadata}")

#fun_5()

#过滤元数据 filter的查询
def fun_6():
    dotenv.load_dotenv()
    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )
    redis_url = "redis://localhost:6379"
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model
        ,index_name="my_documents"
    )
    query = "小说"
    query_embedding = embedding_model.embed_query(query)
    relevant_docs = vector_store.similarity_search_by_vector(query_embedding, k=3 , metadata_filter={"category": "科技新闻"})
    print(f"\n查询: {query}")
    print("相关文档:")
    for i, doc in enumerate(relevant_docs):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   来源: {doc.metadata}")

#fun_6()

#通过余弦相似度分数进行查询
def fun_7():
    dotenv.load_dotenv()
    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )
    redis_url = "redis://localhost:6379"
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,
        index_name="my_documents"
    )
    query = "小说"
    relevant_docs = vector_store.similarity_search_with_relevance_scores(query)
    print(f"\n查询: {query}")
    print("相关文档:")
    for i, (doc, score) in enumerate(relevant_docs):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   来源: {doc.metadata}")

#fun_7()


#langchain-redis底层没有实现评分机制
def fun_8():
    dotenv.load_dotenv()
    embedding_model = OpenAIEmbeddings(
        model=os.getenv("CLOSE_EMBEDDING_MODEL"),
        base_url=os.getenv("CLOSE_BASE_URL"),
        api_key=os.getenv("CLOSE_API_KEY")
    )
    redis_url = "redis://localhost:6379"
    vector_store = RedisVectorStore(
        redis_url=redis_url,
        embeddings=embedding_model,
        index_name="my_documents_1"
    )
    query = "小说"
    #创建检索器，并过滤元数据
    # 创建向量搜索器
    # 获取带分数的结果
    docs_and_scores = vector_store.similarity_search_with_relevance_scores(query, k=3)

    # 手动设置阈值并过滤
    threshold = 0.1
    filtered_results = [(doc, score) for doc, score in docs_and_scores if score >= threshold]

    print(f"\n查询: {query}")
    print("相关文档:")
    for i, (doc, score) in enumerate(filtered_results):
        print(f"{i + 1}. {doc.page_content}")
        print(f"   相似度得分: {score}")


fun_8()
