import streamlit as st
from langchain_postgres import PGVector
from langchain_core.documents import Document
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import json
import asyncio

# 异步操作在 Streamlit 中的限制：使用模拟方式
# Streamlit 的单线程特性和 greenlet 限制使得直接异步操作不可行

# -------------------------------
# 1. 初始化配置
# -------------------------------
POSTGRES_USER = st.secrets["postgresql"]["username"]
POSTGRES_PASSWORD = st.secrets["postgresql"]["password"]
POSTGRES_HOST = st.secrets["postgresql"]["host"]
POSTGRES_PORT = st.secrets["postgresql"]["port"]
POSTGRES_DB = st.secrets["postgresql"]["database"]

# Embeddings
embeddings = DashScopeEmbeddings(
    model="text-embedding-v3",
    dashscope_api_key=st.secrets["dashscope"]["key"]
)

# PGVector 初始化
collection_name = "test_vectorstore"
connection = f"postgresql+psycopg://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}"
vector_store = PGVector(
    embeddings=embeddings,
    collection_name=collection_name,
    connection=connection,
    use_jsonb=True,
    distance_strategy="cosine",  # 支持: cosine, euclidean, max_inner_product
)

# 异步操作配置 - 在 Streamlit 中使用同步方法模拟异步
# 由于 Streamlit 的单线程特性，直接异步操作会遇到 greenlet 问题
def simulate_async_operation(operation_name, func, *args, **kwargs):
    """模拟异步操作的辅助函数"""
    import time
    with st.spinner(f"正在执行{operation_name}..."):
        time.sleep(0.5)  # 模拟异步延迟
        return func(*args, **kwargs)

engine = create_engine(connection)
Session = sessionmaker(bind=engine)
session = Session()

st.title("🔍 LangChain + PGVector 测试与管理面板")
st.markdown("**增强功能**: 元数据过滤 | MMR搜索 | 异步操作 | 高级配置")
st.markdown("---")

# 配置面板
with st.expander("⚙️ **向量库配置信息**"):
    col1, col2 = st.columns(2)
    with col1:
        st.json({
            "collection_name": collection_name,
            "distance_strategy": "cosine",
            "embedding_model": "text-embedding-v3",
            "use_jsonb": True
        })
    with col2:
        st.json({
            "database": POSTGRES_DB,
            "host": POSTGRES_HOST,
            "port": POSTGRES_PORT,
            "username": POSTGRES_USER
        })

# ===================================================================
# 1️⃣ Embedding 测试
# ===================================================================
st.subheader("📌 Embedding 测试")

text_input = st.text_input("输入单条文本生成向量", "This is a test document.")
if st.button("生成单条向量"):
    vector = embeddings.embed_query(text_input)
    st.write(f"向量维度: {len(vector)}")
    st.write(f"前 5 个向量值: {vector[:5]}")

docs_input = st.text_area("输入批量文本（每行一条）", "foo\nbar\nbaz")
if st.button("生成批量向量"):
    doc_list = docs_input.split("\n")
    vectors = embeddings.embed_documents(doc_list)
    for i, vec in enumerate(vectors):
        st.write(f"{i+1}: 前 5 个向量值: {vec[:5]}")

st.markdown("---")

# ===================================================================
# 2️⃣ 文档管理
# ===================================================================
st.subheader("📌 文档管理")

# 添加示例文档
if st.button("添加测试文档"):
    docs = [
        Document(page_content="there are cats in the pond", metadata={"id": 1, "topic": "animals"}),
        Document(page_content="ducks are also found in the pond", metadata={"id": 2, "topic": "animals"}),
        Document(page_content="fresh apples are available at the market", metadata={"id": 3, "topic": "food"}),
        Document(page_content="the market also sells fresh oranges", metadata={"id": 4, "topic": "food"}),
        Document(page_content="the new art exhibit is fascinating", metadata={"id": 5, "topic": "art"}),
    ]
    vector_store.add_documents(docs, ids=[doc.metadata["id"] for doc in docs])
    st.success("✅ 已添加测试文档！")

# 删除文档
delete_id = st.text_input("输入要删除的文档 ID")
if st.button("删除文档"):
    vector_store.delete(ids=[delete_id])
    st.warning(f"已删除 ID={delete_id} 的文档")

# 批量导入 JSON 文档
uploaded_file = st.file_uploader("上传 JSON 文档批量导入", type=["json"])
if uploaded_file is not None:
    try:
        data = json.load(uploaded_file)
        docs = [Document(page_content=item["page_content"], metadata=item.get("metadata", {})) for item in data]
        vector_store.add_documents(docs, ids=[doc.metadata.get("id") for doc in docs])
        st.success(f"✅ 成功导入 {len(docs)} 条文档！")
    except Exception as e:
        st.error(f"导入失败: {e}")

# 文档更新功能
st.write("🔄 **文档更新**")
update_id = st.text_input("要更新的文档 ID")
if update_id:
    new_content = st.text_area("新内容", key="update_content")
    new_metadata_str = st.text_area("新元数据 (JSON 格式)", '{"topic": "updated"}', key="update_metadata")
    if st.button("更新文档"):
        try:
            new_metadata = json.loads(new_metadata_str)
            # 先删除旧文档，再添加新文档
            vector_store.delete(ids=[update_id])
            new_doc = Document(page_content=new_content, metadata=new_metadata)
            vector_store.add_documents([new_doc], ids=[update_id])
            st.success(f"✅ 文档 ID={update_id} 更新成功！")
        except json.JSONDecodeError:
            st.error("元数据格式错误，请使用正确的 JSON 格式")
        except Exception as e:
            st.error(f"更新失败: {e}")

st.markdown("---")

# ===================================================================
# 3️⃣ 相似度搜索
# ===================================================================
st.subheader("📌 相似度搜索")
query = st.text_input("输入搜索问题", "cats")
k = st.slider("返回数量 k", 1, 10, 3)

# 元数据过滤
st.write("🔍 **元数据过滤选项**")
col1, col2 = st.columns(2)
with col1:
    use_filter = st.checkbox("启用元数据过滤")
with col2:
    if use_filter:
        filter_key = st.text_input("过滤字段", "topic")
        filter_value = st.text_input("过滤值", "animals")

if st.button("普通搜索"):
    filter_dict = {filter_key: filter_value} if use_filter and filter_key and filter_value else None
    results = vector_store.similarity_search(query, k=k, filter=filter_dict)
    for doc in results:
        st.write(f"📄 {doc.page_content} | Metadata={doc.metadata}")

if st.button("带分数搜索"):
    filter_dict = {filter_key: filter_value} if use_filter and filter_key and filter_value else None
    results = vector_store.similarity_search_with_score(query=query, k=k, filter=filter_dict)
    for doc, score in results:
        st.write(f"[SIM={score:.3f}] {doc.page_content} | Metadata={doc.metadata}")

# 阻容阻抗型搜索
st.write("🚫 **阻容阻抗模式搜索**")
if st.button("阻容搜索 (similarity_search_by_vector)"):
    query_vector = embeddings.embed_query(query)
    results = vector_store.similarity_search_by_vector(query_vector, k=k)
    for doc in results:
        st.write(f"🔍 {doc.page_content} | Metadata={doc.metadata}")

st.markdown("---")

# ===================================================================
# 4️⃣ MMR & Retriever 测试
# ===================================================================
st.subheader("📌 MMR & Retriever 测试")

# MMR 搜索
st.write("🔄 **MMR (最大边际相关性) 搜索**")
col1, col2, col3 = st.columns(3)
with col1:
    mmr_k = st.slider("MMR 返回数量", 1, 10, 4, key="mmr_k")
with col2:
    fetch_k = st.slider("获取数量", 5, 50, 20, key="fetch_k")
with col3:
    lambda_mult = st.slider("多样性参数", 0.0, 1.0, 0.5, key="lambda_mult")

if st.button("MMR 搜索"):
    results = vector_store.max_marginal_relevance_search(
        query=query, 
        k=mmr_k, 
        fetch_k=fetch_k, 
        lambda_mult=lambda_mult
    )
    for i, doc in enumerate(results):
        st.write(f"📌 {i+1}: {doc.page_content} | Metadata={doc.metadata}")

st.markdown("---")

# Retriever 测试
st.write("🔍 **Retriever 测试**")
search_type = st.selectbox("选择搜索方式", ["similarity", "mmr"])
if search_type == "mmr":
    search_kwargs = {"k": k, "fetch_k": fetch_k, "lambda_mult": lambda_mult}
else:
    search_kwargs = {"k": k}

retriever = vector_store.as_retriever(search_type=search_type, search_kwargs=search_kwargs)
if st.button("执行 Retriever"):
    results = retriever.invoke(query)
    for doc in results:
        st.write(f"📌 {doc.page_content} | Metadata={doc.metadata}")

st.markdown("---")

# ===================================================================
# 5️⃣ 网页加载 & 向量化
# ===================================================================
st.subheader("🌐 网页加载 & 文档入库")
page_url = st.text_input("输入网页 URL", "https://python.langchain.ac.cn/docs/how_to/chatbots_memory/")
if st.button("加载网页并向量化"):
    loader = WebBaseLoader(web_paths=[page_url])
    docs = loader.load()
    st.write(f"共加载 {len(docs)} 篇文档")
    splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
    split_docs = splitter.split_documents(docs)
    st.write(f"分块后文档数量: {len(split_docs)}")
    vector_store.add_documents(split_docs)
    st.success("✅ 网页内容已分块并写入向量库！")

st.markdown("---")

# ===================================================================
# 6️⃣ 向量库管理面板
# ===================================================================
st.subheader("📦 向量库管理")
tab1, tab2, tab3 = st.tabs(["📄 查看文档", "📊 集合统计", "⚠️ 高级操作"])

# TAB1: 分页浏览 & 导出
with tab1:
    st.write("🔹 浏览向量库中已存储的文档")
    query = f"""
        SELECT id, document, cmetadata
        FROM langchain_pg_embedding
        WHERE collection_id = (
            SELECT uuid FROM langchain_pg_collection
            WHERE name = '{collection_name}'
        )
        ORDER BY id ASC
    """
    df = pd.read_sql(query, engine)
    if df.empty:
        st.info("📭 当前集合中没有文档")
    else:
        page_size = st.slider("每页显示数量", 5, 50, 10, key="page_size")
        total_pages = (len(df) // page_size) + 1
        page = st.number_input("选择页码", 1, total_pages, 1, key="page_num")
        start_idx = (page - 1) * page_size
        end_idx = start_idx + page_size
        st.dataframe(df.iloc[start_idx:end_idx])
        # 修复导出 CSV 问题
        export_df = df.copy()
        if 'id' in export_df.columns:
            export_df['id'] = export_df['id'].astype(str)
        st.download_button("📥 导出 CSV", export_df.to_csv(index=False).encode("utf-8"), "vectorstore_export.csv", "text/csv")

# TAB2: 集合统计
with tab2:
    try:
        count_query = f"""
            SELECT COUNT(*) FROM langchain_pg_embedding
            WHERE collection_id = (
                SELECT uuid FROM langchain_pg_collection
                WHERE name = '{collection_name}'
            )
        """
        count = pd.read_sql(count_query, engine).iloc[0,0]
        dim = getattr(embeddings, "embedding_dimensions", None) or 1536
        # 获取更多统计信息
        collection_info_query = f"""
            SELECT 
                name,
                uuid,
                cmetadata
            FROM langchain_pg_collection 
            WHERE name = '{collection_name}'
        """
        collection_df = pd.read_sql(collection_info_query, engine)
        
        # 获取元数据统计
        metadata_stats_query = f"""
            SELECT 
                json_object_keys(cmetadata) as metadata_key,
                COUNT(*) as count
            FROM langchain_pg_embedding e
            JOIN langchain_pg_collection c ON e.collection_id = c.uuid
            WHERE c.name = '{collection_name}'
            GROUP BY json_object_keys(cmetadata)
            ORDER BY count DESC
        """
        try:
            metadata_df = pd.read_sql(metadata_stats_query, engine)
        except Exception:
            metadata_df = pd.DataFrame()
        st.json({"collection_name": collection_name, "文档数量": count, "embedding维度": dim})
        
        if not collection_df.empty:
            st.write("**集合详情:**")
            # 修复 UUID 类型显示问题
            collection_display = collection_df.copy()
            if 'uuid' in collection_display.columns:
                collection_display['uuid'] = collection_display['uuid'].astype(str)
            st.dataframe(collection_display)
        
        if not metadata_df.empty:
            st.write("**元数据统计:**")
            st.dataframe(metadata_df)
    except Exception as e:
        st.error(f"统计失败: {e}")

# TAB3: 高级操作
with tab3:
    st.warning("⚠️ 危险操作，请谨慎使用！")
    
    # 模拟异步操作测试
    st.subheader("🚀 模拟异步操作测试")
    st.info("💡 **在 Streamlit 中模拟异步操作**\n\n由于 Streamlit 的单线程特性，直接异步操作会遇到 greenlet 问题。\n这里使用同步方法模拟异步操作的效果。")
    
    async_query = st.text_input("模拟异步搜索查询", "cats", key="async_query")
    
    col1, col2 = st.columns(2)
    
    with col1:
        if st.button("🚀 模拟异步相似度搜索"):
            results = simulate_async_operation(
                "异步相似度搜索",
                vector_store.similarity_search,
                async_query, k=3
            )
            st.write("**模拟异步搜索结果:**")
            for i, doc in enumerate(results):
                st.write(f"⚙️ {i+1}: {doc.page_content} | Metadata={doc.metadata}")
    
    with col2:
        if st.button("🚀 模拟异步带分数搜索"):
            results = simulate_async_operation(
                "异步带分数搜索",
                vector_store.similarity_search_with_score,
                async_query, k=3
            )
            st.write("**模拟异步带分数搜索结果:**")
            for i, (doc, score) in enumerate(results):
                st.write(f"⚙️ {i+1}: [SIM={score:.3f}] {doc.page_content} | Metadata={doc.metadata}")
    
    # 异步操作的技术说明
    with st.expander("📄 关于 Streamlit 中的异步操作"):
        st.markdown("""
        **为什么在 Streamlit 中异步操作困难？**
        
        1. **单线程环境**: Streamlit 运行在单线程中，与异步的事件循环冲突
        2. **Greenlet 限制**: SQLAlchemy 的异步支持依赖于 greenlet，在 Streamlit 中不可用
        3. **事件循环管理**: Streamlit 的内部事件循环与 asyncio 冲突
        
        **替代方案：**
        - 使用同步方法加上加载动画模拟异步效果
        - 在独立的 FastAPI 服务中使用真正的异步操作
        - 使用线程池或进程池处理计算密集型任务
        """)
    
    st.markdown("---")
    
    if st.button("🗑 清空集合"):
        try:
            vector_store.delete(ids=None)
            st.success("✅ 已清空集合！")
        except Exception as e:
            st.error(f"清空失败: {e}")
