import streamlit as st
from langchain_postgres import PGVector
from langchain_core.documents import Document
from langchain_community.embeddings import DashScopeEmbeddings
from langchain_community.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine, text
import json
import asyncio
from service.logger_service import get_logger_service


# ======================== 全局变量 ========================
logger = get_logger_service()

logger.log_system_event("pages_test\vector.py", "加载中", level="info")

st.set_page_config(
    page_title="向量服务测试",
    page_icon="🧪",
    layout="centered"
)

with st.spinner("初始化向量数据库..."):
    try:
        # 获取数据库配置
        db_config = {
            'user': st.secrets.get("postgresql", {}).get("username") or st.secrets.get("connections", {}).get("sql", {}).get("username", "docker"),
            'password': st.secrets.get("postgresql", {}).get("password") or st.secrets.get("connections", {}).get("sql", {}).get("password", "docker"),
            'host': st.secrets.get("postgresql", {}).get("host") or st.secrets.get("connections", {}).get("sql", {}).get("host", "localhost"),
            'port': st.secrets.get("postgresql", {}).get("port") or st.secrets.get("connections", {}).get("sql", {}).get("port", 5432),
            'database': st.secrets.get("postgresql", {}).get("database") or st.secrets.get("connections", {}).get("sql", {}).get("database", "streamlit_generic")
        }
        # 连接字符串
        connection_string = (
            f"postgresql+psycopg://{db_config['user']}:"
            f"{db_config['password']}@{db_config['host']}:"
            f"{db_config['port']}/{db_config['database']}"
        )
        engine = create_engine(connection_string)
        Session = sessionmaker(bind=engine)
        session = Session()
        st.success("数据库连接成功")
    except Exception as e:
        logger.log_error("获取数据库连接失败", e)
        st.error("数据库连接失败")
        st.stop()


with st.spinner("获取知识库列表..."):
    try:
        # 查询所有集合及其详细信息
        collections_query = text("""
            SELECT 
                c.name as collection_name,
                c.uuid as collection_id,
                c.cmetadata as metadata,
                COUNT(e.id) as document_count,
                MIN(e.id) as first_doc_id,
                MAX(e.id) as last_doc_id
            FROM langchain_pg_collection c
            LEFT JOIN langchain_pg_embedding e ON c.uuid = e.collection_id
            WHERE c.name IS NOT NULL 
            GROUP BY c.name, c.uuid
            ORDER BY c.name
        """)
        collections_list = []
        with engine.connect() as conn:
            result = conn.execute(collections_query)
            for row in result.fetchall():
                collections_list.append({
                    "name": row[0],
                    "uuid": str(row[1]),
                    "metadata": row[2] or {},
                    "doc_count": int(row[3] or 0),
                    "is_empty": (row[3] or 0) == 0
                })
    except Exception as e:
        logger.log_system_event("get_all_collections_error", f"获取集合列表失败: {e}", level="error")
        st.error(f"获取集合列表失败: {e}")
        st.stop()

# 如果没有找到任何集合，返回空列表
if not collections_list:
    logger.log_system_event("get_all_collections_warning", "未找到任何集合", level="warning")
    st.warning("未找到任何集合")
else:
    st.title("集合列表")
    collection_names = [col["name"] for col in collections_list]
    collection_options = []
    for col in collections_list:
        option_text = f"{col['name']} ({col['doc_count']} 文档)"
        collection_options.append(option_text)
    selected_index = st.selectbox(
        "选择知识库：",
        range(len(collection_options)),
        format_func=lambda x: collection_options[x],
        index=0,
        help="选择要用于检索的向量集合"
    )
    selected_collection = collection_names[selected_index]
    collection_data = collections_list[selected_index]
    st.write(f"name: {collection_data['name']}")
    st.write(f"metadata: {collection_data['metadata']}")
    st.write(f"doc_count: {collection_data['doc_count']}")
    collection_id = collection_data["uuid"]
    st.write(f"uuid: {collection_id}")

if st.button("删除集合文档"):
    sql = text("DELETE FROM langchain_pg_embedding WHERE collection_id = :cid")
    session.execute(sql, {"cid": collection_id})
    session.commit()


if st.button("删除集合"):
    sql = text("DELETE FROM langchain_pg_collection WHERE uuid = :uid")
    session.execute(sql, {"uid": collection_id})
    session.commit()

    # delete_embedding_sql = f"DELETE FROM langchain_pg_embedding WHERE collection_id = '{collection_id}'"
    # delete_collection_sql = f"DELETE FROM langchain_pg_collection WHERE uuid = '{collection_id}'"
    # with engine.connect() as conn:
    #     result = conn.execute(delete_embedding_sql)
    #     result

# with st.button("获取知识库文档内容"):
#     st.write("🔹 浏览向量库中已存储的文档")
#     query = f"""
#         SELECT id, document, cmetadata
#         FROM langchain_pg_embedding
#         WHERE collection_id = {collection_id}
#         ORDER BY id ASC
#     """
#     df = pd.read_sql(query, engine)
#     if df.empty:
#         st.info("📭 当前集合中没有文档")
#     else:
#         page_size = st.slider("每页显示数量", 5, 50, 10, key="page_size")
#         total_pages = (len(df) // page_size) + 1
#         page = st.number_input("选择页码", 1, total_pages, 1, key="page_num")
#         start_idx = (page - 1) * page_size
#         end_idx = start_idx + page_size
#         st.dataframe(df.iloc[start_idx:end_idx])


# # -------------------------------
# # 1. 初始化配置
# # -------------------------------
# POSTGRES_USER = st.secrets["postgresql"]["username"]
# POSTGRES_PASSWORD = st.secrets["postgresql"]["password"]
# POSTGRES_HOST = st.secrets["postgresql"]["host"]
# POSTGRES_PORT = st.secrets["postgresql"]["port"]
# POSTGRES_DB = st.secrets["postgresql"]["database"]

# with st.spinner("初始化向量服务..."):
#     try:
#         embeddings = DashScopeEmbeddings(
#             model="text-embedding-v3",
#             dashscope_api_key=st.secrets["dashscope"]["key"]
#         )
#     except Exception as e:
#         st.error(f"初始化向量服务失败：{e}")
#         st.stop()


#     engine = create_engine(connection)
#     Session = sessionmaker(bind=engine)
#     session = Session()



# @st.cache_data(ttl=60)  # 缓存1分钟
# def get_all_collections() -> List[Dict[str, Any]]:
#     """
#     获取数据库中所有向量集合的详细信息
    
#     Returns:
#         List[Dict[str, Any]]: 包含集合信息的列表，每个元素包含：
#         - name: 集合名称
#         - uuid: 集合UUID
#         - metadata: 集合元数据
#         - doc_count: 文档数量
#         - first_doc_id: 第一个文档ID
#         - last_doc_id: 最后一个文档ID
#         - is_empty: 是否为空集合
#     """
#     logger = get_logger_service()
    
#     try:
#         # 获取数据库配置
#         db_config = {
#             'user': st.secrets.get("postgresql", {}).get("username") or st.secrets.get("connections", {}).get("sql", {}).get("username", "docker"),
#             'password': st.secrets.get("postgresql", {}).get("password") or st.secrets.get("connections", {}).get("sql", {}).get("password", "docker"),
#             'host': st.secrets.get("postgresql", {}).get("host") or st.secrets.get("connections", {}).get("sql", {}).get("host", "localhost"),
#             'port': st.secrets.get("postgresql", {}).get("port") or st.secrets.get("connections", {}).get("sql", {}).get("port", 5432),
#             'database': st.secrets.get("postgresql", {}).get("database") or st.secrets.get("connections", {}).get("sql", {}).get("database", "streamlit_generic")
#         }
        
#         # 连接字符串
#         connection_string = (
#             f"postgresql+psycopg://{db_config['user']}:"
#             f"{db_config['password']}@{db_config['host']}:"
#             f"{db_config['port']}/{db_config['database']}"
#         )
        
#         # 创建数据库连接
#         engine = create_engine(connection_string)
        
#         # 查询所有集合及其详细信息
#         query = text("""
#             SELECT 
#                 c.name as collection_name,
#                 c.uuid as collection_id,
#                 c.cmetadata as metadata,
#                 COUNT(e.id) as document_count,
#                 MIN(e.id) as first_doc_id,
#                 MAX(e.id) as last_doc_id
#             FROM langchain_pg_collection c
#             LEFT JOIN langchain_pg_embedding e ON c.uuid = e.collection_id
#             WHERE c.name IS NOT NULL 
#             GROUP BY c.name, c.uuid
#             ORDER BY c.name
#         """)
        
#         collections_data = []
#         with engine.connect() as conn:
#             result = conn.execute(query)
#             for row in result.fetchall():
#                 collections_data.append({
#                     "name": row[0],
#                     "uuid": str(row[1]),
#                     "metadata": row[2] or {},
#                     "doc_count": int(row[3] or 0),
#                     "first_doc_id": row[4],
#                     "last_doc_id": row[5],
#                     "is_empty": (row[3] or 0) == 0
#                 })
        
#         # 如果没有找到任何集合，返回空列表
#         if not collections_data:
#             logger.log_system_event("get_all_collections_warning", "未找到任何集合", level="warning")
            
#         logger.log_system_event("get_all_collections_success", f"成功获取 {len(collections_data)} 个集合")
#         return collections_data
        
#     except Exception as e:
#         logger.log_system_event("get_all_collections_error", f"获取集合列表失败: {e}", level="error")
#         return []





# # PGVector 初始化
# collection_name = "test_vectorstore"
# connection = f"postgresql+psycopg://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DB}"
# vector_store = PGVector(
#     embeddings=embeddings,
#     collection_name=collection_name,
#     connection=connection,
#     use_jsonb=True,
#     distance_strategy="cosine",  # 支持: cosine, euclidean, max_inner_product
# )

# # 异步操作配置 - 在 Streamlit 中使用同步方法模拟异步
# # 由于 Streamlit 的单线程特性，直接异步操作会遇到 greenlet 问题
# def simulate_async_operation(operation_name, func, *args, **kwargs):
#     """模拟异步操作的辅助函数"""
#     import time
#     with st.spinner(f"正在执行{operation_name}..."):
#         time.sleep(0.5)  # 模拟异步延迟
#         return func(*args, **kwargs)



# st.title("🔍 LangChain + PGVector 测试与管理面板")
# st.markdown("**增强功能**: 元数据过滤 | MMR搜索 | 异步操作 | 高级配置")
# st.markdown("---")

# # 配置面板
# with st.expander("⚙️ **向量库配置信息**"):
#     col1, col2 = st.columns(2)
#     with col1:
#         st.json({
#             "collection_name": collection_name,
#             "distance_strategy": "cosine",
#             "embedding_model": "text-embedding-v3",
#             "use_jsonb": True
#         })
#     with col2:
#         st.json({
#             "database": POSTGRES_DB,
#             "host": POSTGRES_HOST,
#             "port": POSTGRES_PORT,
#             "username": POSTGRES_USER
#         })

# # ===================================================================
# # 1️⃣ Embedding 测试
# # ===================================================================
# st.subheader("📌 Embedding 测试")

# text_input = st.text_input("输入单条文本生成向量", "This is a test document.")
# if st.button("生成单条向量"):
#     vector = embeddings.embed_query(text_input)
#     st.write(f"向量维度: {len(vector)}")
#     st.write(f"前 5 个向量值: {vector[:5]}")

# docs_input = st.text_area("输入批量文本（每行一条）", "foo\nbar\nbaz")
# if st.button("生成批量向量"):
#     doc_list = docs_input.split("\n")
#     vectors = embeddings.embed_documents(doc_list)
#     for i, vec in enumerate(vectors):
#         st.write(f"{i+1}: 前 5 个向量值: {vec[:5]}")

# st.markdown("---")

# # ===================================================================
# # 2️⃣ 文档管理
# # ===================================================================
# st.subheader("📌 文档管理")

# # 添加示例文档
# if st.button("添加测试文档"):
#     docs = [
#         Document(page_content="there are cats in the pond", metadata={"id": 1, "topic": "animals"}),
#         Document(page_content="ducks are also found in the pond", metadata={"id": 2, "topic": "animals"}),
#         Document(page_content="fresh apples are available at the market", metadata={"id": 3, "topic": "food"}),
#         Document(page_content="the market also sells fresh oranges", metadata={"id": 4, "topic": "food"}),
#         Document(page_content="the new art exhibit is fascinating", metadata={"id": 5, "topic": "art"}),
#     ]
#     vector_store.add_documents(docs, ids=[doc.metadata["id"] for doc in docs])
#     st.success("✅ 已添加测试文档！")

# # 删除文档
# delete_id = st.text_input("输入要删除的文档 ID")
# if st.button("删除文档"):
#     vector_store.delete(ids=[delete_id])
#     st.warning(f"已删除 ID={delete_id} 的文档")

# # 批量导入 JSON 文档
# uploaded_file = st.file_uploader("上传 JSON 文档批量导入", type=["json"])
# if uploaded_file is not None:
#     try:
#         data = json.load(uploaded_file)
#         docs = [Document(page_content=item["page_content"], metadata=item.get("metadata", {})) for item in data]
#         vector_store.add_documents(docs, ids=[doc.metadata.get("id") for doc in docs])
#         st.success(f"✅ 成功导入 {len(docs)} 条文档！")
#     except Exception as e:
#         st.error(f"导入失败: {e}")

# # 文档更新功能
# st.write("🔄 **文档更新**")
# update_id = st.text_input("要更新的文档 ID")
# if update_id:
#     new_content = st.text_area("新内容", key="update_content")
#     new_metadata_str = st.text_area("新元数据 (JSON 格式)", '{"topic": "updated"}', key="update_metadata")
#     if st.button("更新文档"):
#         try:
#             new_metadata = json.loads(new_metadata_str)
#             # 先删除旧文档，再添加新文档
#             vector_store.delete(ids=[update_id])
#             new_doc = Document(page_content=new_content, metadata=new_metadata)
#             vector_store.add_documents([new_doc], ids=[update_id])
#             st.success(f"✅ 文档 ID={update_id} 更新成功！")
#         except json.JSONDecodeError:
#             st.error("元数据格式错误，请使用正确的 JSON 格式")
#         except Exception as e:
#             st.error(f"更新失败: {e}")

# st.markdown("---")

# # ===================================================================
# # 3️⃣ 相似度搜索
# # ===================================================================
# st.subheader("📌 相似度搜索")
# query = st.text_input("输入搜索问题", "cats")
# k = st.slider("返回数量 k", 1, 10, 3)

# # 元数据过滤
# st.write("🔍 **元数据过滤选项**")
# col1, col2 = st.columns(2)
# with col1:
#     use_filter = st.checkbox("启用元数据过滤")
# with col2:
#     if use_filter:
#         filter_key = st.text_input("过滤字段", "topic")
#         filter_value = st.text_input("过滤值", "animals")

# if st.button("普通搜索"):
#     filter_dict = {filter_key: filter_value} if use_filter and filter_key and filter_value else None
#     results = vector_store.similarity_search(query, k=k, filter=filter_dict)
#     for doc in results:
#         st.write(f"📄 {doc.page_content} | Metadata={doc.metadata}")

# if st.button("带分数搜索"):
#     filter_dict = {filter_key: filter_value} if use_filter and filter_key and filter_value else None
#     results = vector_store.similarity_search_with_score(query=query, k=k, filter=filter_dict)
#     for doc, score in results:
#         st.write(f"[SIM={score:.3f}] {doc.page_content} | Metadata={doc.metadata}")

# # 阻容阻抗型搜索
# st.write("🚫 **阻容阻抗模式搜索**")
# if st.button("阻容搜索 (similarity_search_by_vector)"):
#     query_vector = embeddings.embed_query(query)
#     results = vector_store.similarity_search_by_vector(query_vector, k=k)
#     for doc in results:
#         st.write(f"🔍 {doc.page_content} | Metadata={doc.metadata}")

# st.markdown("---")

# # ===================================================================
# # 4️⃣ MMR & Retriever 测试
# # ===================================================================
# st.subheader("📌 MMR & Retriever 测试")

# # MMR 搜索
# st.write("🔄 **MMR (最大边际相关性) 搜索**")
# col1, col2, col3 = st.columns(3)
# with col1:
#     mmr_k = st.slider("MMR 返回数量", 1, 10, 4, key="mmr_k")
# with col2:
#     fetch_k = st.slider("获取数量", 5, 50, 20, key="fetch_k")
# with col3:
#     lambda_mult = st.slider("多样性参数", 0.0, 1.0, 0.5, key="lambda_mult")

# if st.button("MMR 搜索"):
#     results = vector_store.max_marginal_relevance_search(
#         query=query, 
#         k=mmr_k, 
#         fetch_k=fetch_k, 
#         lambda_mult=lambda_mult
#     )
#     for i, doc in enumerate(results):
#         st.write(f"📌 {i+1}: {doc.page_content} | Metadata={doc.metadata}")

# st.markdown("---")

# # Retriever 测试
# st.write("🔍 **Retriever 测试**")
# search_type = st.selectbox("选择搜索方式", ["similarity", "mmr"])
# if search_type == "mmr":
#     search_kwargs = {"k": k, "fetch_k": fetch_k, "lambda_mult": lambda_mult}
# else:
#     search_kwargs = {"k": k}

# retriever = vector_store.as_retriever(search_type=search_type, search_kwargs=search_kwargs)
# if st.button("执行 Retriever"):
#     results = retriever.invoke(query)
#     for doc in results:
#         st.write(f"📌 {doc.page_content} | Metadata={doc.metadata}")

# st.markdown("---")

# # ===================================================================
# # 5️⃣ 网页加载 & 向量化
# # ===================================================================
# st.subheader("🌐 网页加载 & 文档入库")
# page_url = st.text_input("输入网页 URL", "https://python.langchain.ac.cn/docs/how_to/chatbots_memory/")
# if st.button("加载网页并向量化"):
#     loader = WebBaseLoader(web_paths=[page_url])
#     docs = loader.load()
#     st.write(f"共加载 {len(docs)} 篇文档")
#     splitter = RecursiveCharacterTextSplitter(chunk_size=500, chunk_overlap=50)
#     split_docs = splitter.split_documents(docs)
#     st.write(f"分块后文档数量: {len(split_docs)}")
#     vector_store.add_documents(split_docs)
#     st.success("✅ 网页内容已分块并写入向量库！")

# st.markdown("---")

# # ===================================================================
# # 6️⃣ 向量库管理面板
# # ===================================================================
# st.subheader("📦 向量库管理")
# tab1, tab2, tab3 = st.tabs(["📄 查看文档", "📊 集合统计", "⚠️ 高级操作"])

# # TAB1: 分页浏览 & 导出
# with tab1:
#     st.write("🔹 浏览向量库中已存储的文档")
#     query = f"""
#         SELECT id, document, cmetadata
#         FROM langchain_pg_embedding
#         WHERE collection_id = (
#             SELECT uuid FROM langchain_pg_collection
#             WHERE name = '{collection_name}'
#         )
#         ORDER BY id ASC
#     """
#     df = pd.read_sql(query, engine)
#     if df.empty:
#         st.info("📭 当前集合中没有文档")
#     else:
#         page_size = st.slider("每页显示数量", 5, 50, 10, key="page_size")
#         total_pages = (len(df) // page_size) + 1
#         page = st.number_input("选择页码", 1, total_pages, 1, key="page_num")
#         start_idx = (page - 1) * page_size
#         end_idx = start_idx + page_size
#         st.dataframe(df.iloc[start_idx:end_idx])
#         # 修复导出 CSV 问题
#         export_df = df.copy()
#         if 'id' in export_df.columns:
#             export_df['id'] = export_df['id'].astype(str)
#         st.download_button("📥 导出 CSV", export_df.to_csv(index=False).encode("utf-8"), "vectorstore_export.csv", "text/csv")

# # TAB2: 集合统计
# with tab2:
#     try:
#         count_query = f"""
#             SELECT COUNT(*) FROM langchain_pg_embedding
#             WHERE collection_id = (
#                 SELECT uuid FROM langchain_pg_collection
#                 WHERE name = '{collection_name}'
#             )
#         """
#         count = pd.read_sql(count_query, engine).iloc[0,0]
#         dim = getattr(embeddings, "embedding_dimensions", None) or 1536
#         # 获取更多统计信息
#         collection_info_query = f"""
#             SELECT 
#                 name,
#                 uuid,
#                 cmetadata
#             FROM langchain_pg_collection 
#             WHERE name = '{collection_name}'
#         """
#         collection_df = pd.read_sql(collection_info_query, engine)
        
#         # 获取元数据统计
#         metadata_stats_query = f"""
#             SELECT 
#                 json_object_keys(cmetadata) as metadata_key,
#                 COUNT(*) as count
#             FROM langchain_pg_embedding e
#             JOIN langchain_pg_collection c ON e.collection_id = c.uuid
#             WHERE c.name = '{collection_name}'
#             GROUP BY json_object_keys(cmetadata)
#             ORDER BY count DESC
#         """
#         try:
#             metadata_df = pd.read_sql(metadata_stats_query, engine)
#         except Exception:
#             metadata_df = pd.DataFrame()
#         st.json({"collection_name": collection_name, "文档数量": count, "embedding维度": dim})
        
#         if not collection_df.empty:
#             st.write("**集合详情:**")
#             # 修复 UUID 类型显示问题
#             collection_display = collection_df.copy()
#             if 'uuid' in collection_display.columns:
#                 collection_display['uuid'] = collection_display['uuid'].astype(str)
#             st.dataframe(collection_display)
        
#         if not metadata_df.empty:
#             st.write("**元数据统计:**")
#             st.dataframe(metadata_df)
#     except Exception as e:
#         st.error(f"统计失败: {e}")

# # TAB3: 高级操作
# with tab3:
#     st.warning("⚠️ 危险操作，请谨慎使用！")
    
#     # 模拟异步操作测试
#     st.subheader("🚀 模拟异步操作测试")
#     st.info("💡 **在 Streamlit 中模拟异步操作**\n\n由于 Streamlit 的单线程特性，直接异步操作会遇到 greenlet 问题。\n这里使用同步方法模拟异步操作的效果。")
    
#     async_query = st.text_input("模拟异步搜索查询", "cats", key="async_query")
    
#     col1, col2 = st.columns(2)
    
#     with col1:
#         if st.button("🚀 模拟异步相似度搜索"):
#             results = simulate_async_operation(
#                 "异步相似度搜索",
#                 vector_store.similarity_search,
#                 async_query, k=3
#             )
#             st.write("**模拟异步搜索结果:**")
#             for i, doc in enumerate(results):
#                 st.write(f"⚙️ {i+1}: {doc.page_content} | Metadata={doc.metadata}")
    
#     with col2:
#         if st.button("🚀 模拟异步带分数搜索"):
#             results = simulate_async_operation(
#                 "异步带分数搜索",
#                 vector_store.similarity_search_with_score,
#                 async_query, k=3
#             )
#             st.write("**模拟异步带分数搜索结果:**")
#             for i, (doc, score) in enumerate(results):
#                 st.write(f"⚙️ {i+1}: [SIM={score:.3f}] {doc.page_content} | Metadata={doc.metadata}")
    
#     # 异步操作的技术说明
#     with st.expander("📄 关于 Streamlit 中的异步操作"):
#         st.markdown("""
#         **为什么在 Streamlit 中异步操作困难？**
        
#         1. **单线程环境**: Streamlit 运行在单线程中，与异步的事件循环冲突
#         2. **Greenlet 限制**: SQLAlchemy 的异步支持依赖于 greenlet，在 Streamlit 中不可用
#         3. **事件循环管理**: Streamlit 的内部事件循环与 asyncio 冲突
        
#         **替代方案：**
#         - 使用同步方法加上加载动画模拟异步效果
#         - 在独立的 FastAPI 服务中使用真正的异步操作
#         - 使用线程池或进程池处理计算密集型任务
#         """)
    
#     st.markdown("---")
    
#     if st.button("🗑 清空集合"):
#         try:
#             vector_store.delete(ids=None)
#             st.success("✅ 已清空集合！")
#         except Exception as e:
#             st.error(f"清空失败: {e}")
