# """
# 向量服务测试页面
# 测试 VectorService 的各种功能
# """
import streamlit as st
import json
import time
from typing import Dict, Any, List
from langchain_core.documents import Document
from service import get_vector_service, create_vector_service
import pandas as pd

st.set_page_config(
    page_title="向量服务测试",
    page_icon="🧪",
    layout="centered"
)

st.title("🧪 向量服务 (VectorService) 测试页面")
st.markdown("**测试基于服务层封装的向量数据库功能**")
st.markdown("---")

# # ===================================================================
# # 服务配置区域
# # ===================================================================
st.subheader("1.获取集合列表")

from service.vector_service import get_all_collections

with st.spinner("正在获取集合列表..."):
    collections = get_all_collections()
    if collections is None:
        st.error("获取集合列表失败！")
        st.stop()
    st.success("获取集合列表成功！")
    collection_names = [col["name"] for col in collections]
    collection_options = []
    for col in collections:
        option_text = f"{col['name']} ({col['doc_count']} 文档)"
        collection_options.append(option_text)
    selected_index = st.selectbox(
        "选择知识库：",
        range(len(collection_options)),
        format_func=lambda x: collection_options[x],
        index=0,
        help="选择要用于检索的向量集合"
    )
    selected_collection = collection_names[selected_index]
    collection_data = collections[selected_index]
    st.write(f"name: {collection_data['name']}")
    st.write(f"uuid: {collection_data['uuid']}")
    st.write(f"metadata: {collection_data['metadata']}")
    st.write(f"doc_count: {collection_data['doc_count']}")

collection_id = collection_data["uuid"]
st.write(f"uuid: {collection_id}")

st.write("🔹 浏览向量库中已存储的文档")
query = f"""
    SELECT id, document, cmetadata
    FROM langchain_pg_embedding
    WHERE collection_id = '{collection_id}'
    ORDER BY id ASC
"""
df = pd.read_sql(query, engine)
if df.empty:
    st.info("📭 当前集合中没有文档")
else:
    page_size = st.slider("每页显示数量", 5, 50, 10, key="page_size")
    total_pages = (len(df) // page_size) + 1
    page = st.number_input("选择页码", 1, total_pages, 1, key="page_num")
    start_idx = (page - 1) * page_size
    end_idx = start_idx + page_size
    st.dataframe(df.iloc[start_idx:end_idx])

# st.subheader("1.删除集合")
# if st.button("删除集合"):
#     delete_collection(selected_collection)








# col1, col2, col3 = st.columns(3)
# with col1:
#     collection_name = st.text_input("集合名称", "vector_service_test", key="collection_name")
# with col2:
#     embeddings_model = st.selectbox(
#         "嵌入模型", 
#         ["text-embedding-v3", "text-embedding-v4"], 
#         key="embeddings_model"
#     )
# with col3:
#     distance_strategy = st.selectbox(
#         "距离策略",
#         ["cosine", "euclidean", "max_inner_product"],
#         key="distance_strategy"
#     )

# # 初始化服务
# @st.cache_data
# def get_service_config():
#     return {
#         "collection_name": collection_name,
#         "embeddings_model": embeddings_model,
#         "distance_strategy": distance_strategy
#     }

# try:
#     vector_service = get_vector_service(
#         collection_name=collection_name,
#         embeddings_model=embeddings_model,
#         distance_strategy=distance_strategy
#     )
#     st.success(f"✅ 向量服务初始化成功: {collection_name}")
# except Exception as e:
#     st.error(f"❌ 向量服务初始化失败: {e}")
#     st.stop()

# # 显示服务状态
# with st.expander("📊 服务状态信息"):
#     status = vector_service.get_service_status()
#     col1, col2 = st.columns(2)
#     with col1:
#         st.json({k: v for k, v in status.items() if k != "database_config"})
#     with col2:
#         if "database_config" in status:
#             st.json(status["database_config"])

# st.markdown("---")

# # ===================================================================
# # 功能测试选项卡
# # ===================================================================
# tab1, tab2, tab3, tab4, tab5 = st.tabs([
#     "🏗️ 集合管理", 
#     "📝 文档操作", 
#     "🔍 搜索功能", 
#     "📊 数据管理", 
#     "🧪 综合测试"
# ])

# # ===================================================================
# # TAB1: 集合管理
# # ===================================================================
# with tab1:
#     st.subheader("🏗️ 集合管理测试")
    
#     col1, col2 = st.columns(2)
    
#     with col1:
#         st.write("**创建集合**")
#         metadata_input = st.text_area(
#             "集合元数据 (JSON)",
#             '{"description": "测试集合", "version": "1.0"}',
#             key="collection_metadata"
#         )
        
#         if st.button("创建集合", key="create_collection"):
#             try:
#                 metadata = json.loads(metadata_input) if metadata_input else None
#                 success = vector_service.create_collection(metadata)
#                 if success:
#                     st.success("✅ 集合创建成功！")
#                 else:
#                     st.error("❌ 集合创建失败")
#             except json.JSONDecodeError:
#                 st.error("❌ 元数据格式错误，请检查JSON格式")
#             except Exception as e:
#                 st.error(f"❌ 创建失败: {e}")
    
#     with col2:
#         st.write("**集合信息**")
#         if st.button("获取集合信息", key="get_collection_info"):
#             info = vector_service.get_collection_info()
#             st.json({
#                 "名称": info.name,
#                 "UUID": info.uuid,
#                 "文档数量": info.document_count,
#                 "元数据": info.metadata
#             })
    
#     st.markdown("---")
    
#     # 清空集合
#     st.write("**⚠️ 危险操作**")
#     col1, col2 = st.columns([1, 3])
#     with col1:
#         if st.button("🗑️ 清空集合", type="secondary"):
#             success = vector_service.clear_collection()
#             if success:
#                 st.success("✅ 集合已清空")
#             else:
#                 st.error("❌ 清空失败")
#     with col2:
#         st.warning("此操作将删除集合中的所有文档，请谨慎使用！")

# # ===================================================================
# # TAB2: 文档操作
# # ===================================================================
# with tab2:
#     st.subheader("📝 文档操作测试")
    
#     # 添加示例文档
#     st.write("**批量添加示例文档**")
#     if st.button("添加测试文档集", key="add_sample_docs"):
#         sample_docs = [
#             Document(
#                 page_content="人工智能是计算机科学的一个分支，致力于创造能够执行通常需要人类智能的任务的机器。",
#                 metadata={"category": "AI", "language": "zh", "source": "sample", "id": "doc_1"}
#             ),
#             Document(
#                 page_content="机器学习是人工智能的一个子领域，专注于开发能够从数据中学习的算法。",
#                 metadata={"category": "ML", "language": "zh", "source": "sample", "id": "doc_2"}
#             ),
#             Document(
#                 page_content="深度学习使用神经网络来模拟人脑的学习过程，在图像识别和自然语言处理方面取得了巨大成功。",
#                 metadata={"category": "DL", "language": "zh", "source": "sample", "id": "doc_3"}
#             ),
#             Document(
#                 page_content="Natural language processing (NLP) enables computers to understand and generate human language.",
#                 metadata={"category": "NLP", "language": "en", "source": "sample", "id": "doc_4"}
#             ),
#             Document(
#                 page_content="Computer vision allows machines to interpret and understand visual information from the world.",
#                 metadata={"category": "CV", "language": "en", "source": "sample", "id": "doc_5"}
#             )
#         ]
        
#         ids = [doc.metadata["id"] for doc in sample_docs]
#         success = vector_service.add_documents(sample_docs, ids)
#         if success:
#             st.success(f"✅ 已添加 {len(sample_docs)} 个测试文档")
#         else:
#             st.error("❌ 文档添加失败")
    
#     st.markdown("---")
    
#     # 添加自定义文档
#     col1, col2 = st.columns(2)
    
#     with col1:
#         st.write("**添加自定义文档**")
#         doc_content = st.text_area("文档内容", key="custom_doc_content")
#         doc_metadata = st.text_area(
#             "文档元数据 (JSON)", 
#             '{"category": "custom", "author": "user"}',
#             key="custom_doc_metadata"
#         )
#         doc_id = st.text_input("文档ID (可选)", key="custom_doc_id")
        
#         if st.button("添加文档", key="add_custom_doc"):
#             if doc_content:
#                 try:
#                     metadata = json.loads(doc_metadata) if doc_metadata else {}
#                     doc = Document(page_content=doc_content, metadata=metadata)
#                     ids = [doc_id] if doc_id else None
#                     success = vector_service.add_documents([doc], ids)
#                     if success:
#                         st.success("✅ 文档添加成功")
#                     else:
#                         st.error("❌ 文档添加失败")
#                 except json.JSONDecodeError:
#                     st.error("❌ 元数据格式错误")
#                 except Exception as e:
#                     st.error(f"❌ 添加失败: {e}")
#             else:
#                 st.warning("请输入文档内容")
    
#     with col2:
#         st.write("**添加文本列表**")
#         texts_input = st.text_area(
#             "文本列表 (每行一个)",
#             "机器人学习如何与人类交互\n数据科学帮助我们理解复杂的数据模式\n云计算提供了可扩展的计算资源",
#             key="texts_input"
#         )
        
#         if st.button("添加文本列表", key="add_texts"):
#             if texts_input:
#                 texts = [t.strip() for t in texts_input.split('\n') if t.strip()]
#                 metadatas = [{"type": "batch_text", "index": i} for i in range(len(texts))]
#                 success = vector_service.add_texts(texts, metadatas)
#                 if success:
#                     st.success(f"✅ 已添加 {len(texts)} 个文本")
#                 else:
#                     st.error("❌ 文本添加失败")
    
#     st.markdown("---")
    
#     # 更新和删除文档
#     col1, col2 = st.columns(2)
    
#     with col1:
#         st.write("**更新文档**")
#         update_id = st.text_input("要更新的文档ID", key="update_doc_id")
#         update_content = st.text_area("新内容", key="update_doc_content")
#         update_metadata = st.text_area(
#             "新元数据 (JSON)", 
#             '{"updated": true}',
#             key="update_doc_metadata"
#         )
        
#         if st.button("更新文档", key="update_doc"):
#             if update_id and update_content:
#                 try:
#                     metadata = json.loads(update_metadata) if update_metadata else {}
#                     success = vector_service.update_document(update_id, update_content, metadata)
#                     if success:
#                         st.success("✅ 文档更新成功")
#                     else:
#                         st.error("❌ 文档更新失败")
#                 except json.JSONDecodeError:
#                     st.error("❌ 元数据格式错误")
#                 except Exception as e:
#                     st.error(f"❌ 更新失败: {e}")
#             else:
#                 st.warning("请输入文档ID和内容")
    
#     with col2:
#         st.write("**删除文档**")
#         delete_ids = st.text_input(
#             "要删除的文档ID (多个用逗号分隔)",
#             key="delete_doc_ids"
#         )
        
#         if st.button("删除文档", key="delete_docs"):
#             if delete_ids:
#                 ids = [id.strip() for id in delete_ids.split(',') if id.strip()]
#                 success = vector_service.delete_documents(ids)
#                 if success:
#                     st.success(f"✅ 已删除 {len(ids)} 个文档")
#                 else:
#                     st.error("❌ 文档删除失败")
#             else:
#                 st.warning("请输入要删除的文档ID")

# # ===================================================================
# # TAB3: 搜索功能
# # ===================================================================
# with tab3:
#     st.subheader("🔍 搜索功能测试")
    
#     # 搜索配置
#     col1, col2, col3 = st.columns(3)
#     with col1:
#         search_query = st.text_input("搜索查询", "人工智能", key="search_query")
#     with col2:
#         search_k = st.slider("返回结果数", 1, 10, 3, key="search_k")
#     with col3:
#         use_filter = st.checkbox("使用元数据过滤", key="use_search_filter")
    
#     # 元数据过滤
#     if use_filter:
#         col1, col2 = st.columns(2)
#         with col1:
#             filter_key = st.text_input("过滤字段", "category", key="filter_key")
#         with col2:
#             filter_value = st.text_input("过滤值", "AI", key="filter_value")
    
#     # 搜索按钮组
#     col1, col2, col3, col4 = st.columns(4)
    
#     with col1:
#         if st.button("🔍 相似度搜索", key="similarity_search"):
#             if search_query:
#                 filter_dict = None
#                 if use_filter and filter_key and filter_value:
#                     filter_dict = {filter_key: filter_value}
                
#                 start_time = time.time()
#                 results = vector_service.similarity_search(search_query, search_k, filter_dict)
#                 end_time = time.time()
                
#                 st.write(f"**搜索结果** (耗时: {end_time - start_time:.3f}s)")
#                 for i, result in enumerate(results, 1):
#                     with st.expander(f"结果 {i}: {result.content[:50]}..."):
#                         st.write(f"**内容**: {result.content}")
#                         st.json(result.metadata)
    
#     with col2:
#         if st.button("📊 带分数搜索", key="similarity_search_score"):
#             if search_query:
#                 filter_dict = None
#                 if use_filter and filter_key and filter_value:
#                     filter_dict = {filter_key: filter_value}
                
#                 start_time = time.time()
#                 results = vector_service.similarity_search_with_score(search_query, search_k, filter_dict)
#                 end_time = time.time()
                
#                 st.write(f"**带分数搜索结果** (耗时: {end_time - start_time:.3f}s)")
#                 for i, result in enumerate(results, 1):
#                     score_color = "🟢" if result.score < 0.3 else "🟡" if result.score < 0.7 else "🔴"
#                     with st.expander(f"{score_color} 结果 {i} (相似度: {result.score:.3f}): {result.content[:50]}..."):
#                         st.write(f"**相似度分数**: {result.score:.4f}")
#                         st.write(f"**内容**: {result.content}")
#                         st.json(result.metadata)
    
#     with col3:
#         if st.button("🔄 MMR搜索", key="mmr_search"):
#             if search_query:
#                 # MMR参数
#                 fetch_k = st.session_state.get('mmr_fetch_k', 20)
#                 lambda_mult = st.session_state.get('mmr_lambda', 0.5)
                
#                 start_time = time.time()
#                 results = vector_service.max_marginal_relevance_search(
#                     search_query, search_k, fetch_k, lambda_mult
#                 )
#                 end_time = time.time()
                
#                 st.write(f"**MMR搜索结果** (耗时: {end_time - start_time:.3f}s)")
#                 for i, result in enumerate(results, 1):
#                     with st.expander(f"🔄 结果 {i}: {result.content[:50]}..."):
#                         st.write(f"**内容**: {result.content}")
#                         st.json(result.metadata)
    
#     with col4:
#         if st.button("⚡ 向量搜索", key="vector_search"):
#             if search_query:
#                 # 先生成查询向量
#                 query_vector = vector_service.embeddings.embed_query(search_query)
                
#                 start_time = time.time()
#                 results = vector_service.similarity_search_by_vector(query_vector, search_k)
#                 end_time = time.time()
                
#                 st.write(f"**向量搜索结果** (耗时: {end_time - start_time:.3f}s)")
#                 st.info(f"查询向量维度: {len(query_vector)}")
#                 for i, result in enumerate(results, 1):
#                     with st.expander(f"⚡ 结果 {i}: {result.content[:50]}..."):
#                         st.write(f"**内容**: {result.content}")
#                         st.json(result.metadata)
    
#     # MMR 参数配置
#     if st.checkbox("显示MMR参数配置", key="show_mmr_config"):
#         col1, col2 = st.columns(2)
#         with col1:
#             fetch_k = st.slider("Fetch K (候选文档数)", 5, 50, 20, key="mmr_fetch_k")
#         with col2:
#             lambda_mult = st.slider("Lambda (多样性参数)", 0.0, 1.0, 0.5, key="mmr_lambda")

# # ===================================================================
# # TAB4: 数据管理
# # ===================================================================
# with tab4:
#     st.subheader("📊 数据管理测试")
    
#     # 文档浏览
#     col1, col2 = st.columns([2, 1])
    
#     with col1:
#         st.write("**文档浏览**")
#         page_size = st.slider("每页显示数量", 5, 50, 10, key="page_size")
#     with col2:
#         if st.button("刷新文档列表", key="refresh_docs"):
#             st.rerun()
    
#     # 获取文档列表
#     docs_df = vector_service.get_documents(limit=page_size * 3)  # 获取更多用于分页
    
#     if not docs_df.empty:
#         total_docs = len(docs_df)
#         total_pages = (total_docs - 1) // page_size + 1
        
#         col1, col2, col3 = st.columns([1, 2, 1])
#         with col2:
#             current_page = st.number_input(
#                 f"页码 (共 {total_pages} 页, {total_docs} 个文档)", 
#                 1, total_pages, 1, key="current_page"
#             )
        
#         start_idx = (current_page - 1) * page_size
#         end_idx = min(start_idx + page_size, total_docs)
#         page_docs = docs_df.iloc[start_idx:end_idx]
        
#         st.dataframe(
#             page_docs,
#             use_container_width=True,
#             hide_index=True
#         )
        
#         # 导出功能
#         col1, col2 = st.columns(2)
#         with col1:
#             if st.button("📥 导出所有文档", key="export_docs"):
#                 export_data = vector_service.export_documents()
#                 st.download_button(
#                     "下载JSON文件",
#                     export_data,
#                     f"{collection_name}_export.json",
#                     "application/json"
#                 )
        
#         with col2:
#             # 上传文档
#             uploaded_file = st.file_uploader("📤 上传JSON文档", type=["json"], key="upload_docs")
#             if uploaded_file is not None:
#                 try:
#                     data = json.load(uploaded_file)
#                     if isinstance(data, list):
#                         documents = []
#                         ids = []
#                         for item in data:
#                             doc = Document(
#                                 page_content=item.get("page_content", ""),
#                                 metadata=item.get("metadata", {})
#                             )
#                             documents.append(doc)
#                             if "id" in item:
#                                 ids.append(str(item["id"]))
                        
#                         success = vector_service.add_documents(documents, ids if ids else None)
#                         if success:
#                             st.success(f"✅ 成功导入 {len(documents)} 个文档")
#                             st.rerun()
#                         else:
#                             st.error("❌ 文档导入失败")
#                     else:
#                         st.error("❌ 文件格式错误，需要JSON数组")
#                 except json.JSONDecodeError:
#                     st.error("❌ JSON文件格式错误")
#                 except Exception as e:
#                     st.error(f"❌ 导入失败: {e}")
#     else:
#         st.info("📭 当前集合中没有文档")
    
#     st.markdown("---")
    
#     # 统计信息
#     col1, col2 = st.columns(2)
    
#     with col1:
#         st.write("**集合统计**")
#         collection_info = vector_service.get_collection_info()
#         st.metric("文档总数", collection_info.document_count)
#         st.metric("嵌入维度", vector_service.get_embedding_dimension())
    
#     with col2:
#         st.write("**元数据统计**")
#         metadata_stats = vector_service.get_metadata_stats()
#         if not metadata_stats.empty:
#             st.dataframe(metadata_stats, use_container_width=True)
#         else:
#             st.info("暂无元数据统计")

# # ===================================================================
# # TAB5: 综合测试
# # ===================================================================
# with tab5:
#     st.subheader("🧪 综合测试")
    
#     # 性能测试
#     st.write("**⚡ 性能测试**")
    
#     col1, col2, col3 = st.columns(3)
#     with col1:
#         perf_docs_count = st.number_input("测试文档数量", 10, 1000, 100, key="perf_docs_count")
#     with col2:
#         perf_query_count = st.number_input("查询次数", 1, 50, 10, key="perf_query_count")
#     with col3:
#         perf_k = st.number_input("每次查询返回数", 1, 20, 5, key="perf_k")
    
#     if st.button("🚀 开始性能测试", key="start_perf_test"):
#         progress_bar = st.progress(0)
#         status_text = st.empty()
        
#         # 创建测试文档
#         status_text.text("生成测试文档...")
#         test_docs = []
#         for i in range(perf_docs_count):
#             content = f"这是测试文档 {i+1}，包含一些关于人工智能、机器学习和数据科学的内容。"
#             metadata = {"test_id": i+1, "batch": "performance_test"}
#             test_docs.append(Document(page_content=content, metadata=metadata))
#             progress_bar.progress((i+1) / perf_docs_count * 0.3)
        
#         # 添加文档
#         status_text.text("添加测试文档...")
#         start_time = time.time()
#         vector_service.add_documents(test_docs)
#         add_time = time.time() - start_time
#         progress_bar.progress(0.5)
        
#         # 执行查询测试
#         status_text.text("执行查询测试...")
#         queries = ["人工智能", "机器学习", "数据科学", "深度学习", "自然语言处理"]
        
#         query_times = []
#         for i in range(perf_query_count):
#             query = queries[i % len(queries)]
#             start_time = time.time()
#             results = vector_service.similarity_search(query, perf_k)
#             query_time = time.time() - start_time
#             query_times.append(query_time)
#             progress_bar.progress(0.5 + (i+1) / perf_query_count * 0.5)
        
#         # 显示结果
#         progress_bar.progress(1.0)
#         status_text.text("测试完成！")
        
#         col1, col2, col3, col4 = st.columns(4)
#         with col1:
#             st.metric("文档添加时间", f"{add_time:.2f}s")
#         with col2:
#             st.metric("平均查询时间", f"{sum(query_times)/len(query_times):.3f}s")
#         with col3:
#             st.metric("最快查询", f"{min(query_times):.3f}s")
#         with col4:
#             st.metric("最慢查询", f"{max(query_times):.3f}s")
        
#         # 清理测试数据
#         if st.button("清理测试数据", key="cleanup_test_data"):
#             # 删除测试文档
#             test_ids = [str(i+1) for i in range(perf_docs_count)]
#             vector_service.delete_documents(test_ids)
#             st.success("✅ 测试数据已清理")
    
#     st.markdown("---")
    
#     # 错误处理测试
#     st.write("**🛠️ 错误处理测试**")
    
#     col1, col2 = st.columns(2)
    
#     with col1:
#         if st.button("测试无效查询", key="test_invalid_query"):
#             try:
#                 results = vector_service.similarity_search("", k=5)
#                 st.info(f"空查询返回 {len(results)} 个结果")
#             except Exception as e:
#                 st.error(f"错误: {e}")
    
#     with col2:
#         if st.button("测试无效文档ID", key="test_invalid_id"):
#             try:
#                 success = vector_service.delete_documents(["nonexistent_id_12345"])
#                 if success:
#                     st.info("删除不存在的ID操作成功")
#                 else:
#                     st.warning("删除操作返回失败")
#             except Exception as e:
#                 st.error(f"错误: {e}")
    
#     # 服务状态监控
#     st.write("**📊 服务监控**")
#     if st.button("刷新服务状态", key="refresh_service_status"):
#         status = vector_service.get_service_status()
        
#         col1, col2 = st.columns(2)
#         with col1:
#             st.json({
#                 "集合名称": status.get("collection_name"),
#                 "文档数量": status.get("document_count"),
#                 "嵌入模型": status.get("embeddings_model"),
#                 "距离策略": status.get("distance_strategy"),
#                 "服务状态": status.get("status")
#             })
        
#         with col2:
#             if "database_config" in status:
#                 st.json(status["database_config"])

# # ===================================================================
# # 底部信息
# # ===================================================================
# st.markdown("---")
# st.info("""
# 💡 **使用提示**:
# - 建议先在"集合管理"标签页创建集合并添加测试数据
# - 在"文档操作"标签页可以管理文档的增删改
# - "搜索功能"标签页提供多种搜索方式的测试
# - "数据管理"标签页用于浏览和导入导出数据
# - "综合测试"标签页包含性能测试和错误处理测试
# """)