import streamlit as st
import os
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
from PIL import Image
import time
import glob
import zipfile
import io

from models.evaluator import Evaluator
from utils.helpers import get_products_from_data_dir, get_product_stats, create_experiment_log, load_evaluation_results, get_experiment_logs

def show_evaluation():
    """显示系统测评页面"""
    
    st.title("系统测评")
    
    # 初始化会话状态
    if "vllm_model" not in st.session_state:
        st.session_state.vllm_model = None
    
    if "knowledge_base" not in st.session_state:
        st.session_state.knowledge_base = None
    
    # 检查模型和知识库是否已加载
    if st.session_state.vllm_model is None:
        st.warning("请先在'缺陷检测'或'对话模式'页面配置VLLM模型")
        return
    
    if st.session_state.knowledge_base is None:
        st.warning("请先在'知识库管理'页面加载CLIP模型和知识库")
        return
    
    # 设置评估目录
    data_dir = "./data"
    result_dir = "./evaluation_results"
    os.makedirs(result_dir, exist_ok=True)
    
    # 获取产品列表
    products = get_products_from_data_dir(data_dir)
    
    if not products:
        st.warning("未找到可用的产品数据集。请检查数据目录。")
        return
    
    # 创建标签页
    tab1, tab2, tab3 = st.tabs(["运行评估", "查看结果", "结果可视化"])
    
    # 运行评估页面
    with tab1:
        st.header("运行系统评估")
        
        # 模型信息显示
        col1, col2 = st.columns(2)
        with col1:
            st.info(f"当前CLIP模型: {st.session_state.clip_model.model_name}")
        with col2:
            st.info(f"当前VLLM模型: {st.session_state.vllm_model.model_name}")
        
        # 选择要评估的产品
        st.subheader("选择产品")
        
        # 默认选中所有产品
        all_products = st.checkbox("选择所有产品", value=True)
        
        if all_products:
            selected_products = products
            st.success(f"已选择所有 {len(products)} 个产品")
        else:
            selected_products = st.multiselect("选择要评估的产品", products)
        
        if not selected_products:
            st.warning("请至少选择一个产品")
            return
        
        # 配置测评参数
        st.subheader("测评配置")
        
        # 实验名称
        experiment_name = st.text_input("实验名称", value=f"实验_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
        
        # 评估参数
        eval_params = {}
        with st.expander("高级配置"):
            eval_params["auto_save_interval"] = st.slider("自动保存间隔(评估产品数)", 1, 5, 1)
            eval_params["show_detailed_progress"] = st.checkbox("显示详细进度", value=True)
        
        # 启动评估
        if st.button("开始评估", key="start_evaluation", type="primary"):
            # 创建评估器
            evaluator = Evaluator(data_dir=data_dir, result_dir=result_dir)
            
            # 构建测试函数
            def detect_function(image_path, product_name, retrieval_results):
                return st.session_state.vllm_model.detect_defect(
                    image_path=image_path,
                    product_name=product_name,
                    knowledge_results=retrieval_results
                )
            
            # 记录参数
            params = {
                "clip_model": st.session_state.clip_model.model_name,
                "vllm_model": st.session_state.vllm_model.model_name,
                "products": selected_products,
                "timestamp": datetime.now().isoformat()
            }
            
            # 创建实验日志
            log_file = create_experiment_log(experiment_name, params, result_dir)
            
            # 创建进度条
            progress_bar = st.progress(0)
            status_text = st.empty()
            
            # 评估总体开始时间
            total_start_time = time.time()
            
            # 对每个产品进行评估
            all_metrics = {}
            
            # 创建用于显示当前测评信息的区域
            current_eval_container = st.container()
            with current_eval_container:
                st.subheader("当前测评进度")
                
                # 当前产品信息
                current_product_info = st.empty()
                
                # 创建列以显示当前图片和结果
                current_image_col, current_results_col = st.columns([1, 2])
                
                with current_image_col:
                    current_image_holder = st.empty()
                
                with current_results_col:
                    current_rag_holder = st.empty()
                    current_prompt_holder = st.empty()
                    current_result_holder = st.empty()
                
                # 创建实时指标显示区域
                metrics_container = st.container()
                with metrics_container:
                    st.subheader("实时评估指标")
                    metrics_placeholder = st.empty()
            
            # 定义回调函数，用于展示评估过程中的信息
            def display_evaluation_progress(img_path, retrieval_results, prompt, result, product_name=None):
                # 显示当前产品
                if product_name:
                    current_product_info.info(f"正在评估产品: **{product_name}**")
                
                # 显示当前图片
                try:
                    image = Image.open(img_path)
                    current_image_holder.image(image, caption=f"当前测试图片: {os.path.basename(img_path)}", use_column_width=True)
                except Exception as e:
                    current_image_holder.error(f"无法加载图片: {str(e)}")
                
                # 显示RAG结果
                rag_html = "<h4>知识库检索结果:</h4><ul>"
                for i, res in enumerate(retrieval_results[:3]):  # 只显示前3个结果
                    similarity = res.get("similarity", 0.0)
                    label = res.get("label", "未知")
                    path = res.get("path", "")
                    filename = os.path.basename(path)
                    rag_html += f"<li>{label} ({similarity:.4f}): {filename}</li>"
                rag_html += "</ul>"
                current_rag_holder.markdown(rag_html, unsafe_allow_html=True)
                
                # 显示Prompt
                with current_prompt_holder.expander("查看Prompt"):
                    st.code(prompt, language="markdown")
                
                # 显示判别结果
                response = result.get("response", {})
                if isinstance(response, str):
                    try:
                        # 尝试提取JSON部分
                        start = response.find("{")
                        end = response.rfind("}") + 1
                        if start >= 0 and end > start:
                            json_str = response[start:end]
                            response = json.loads(json_str)
                    except:
                        response = {"has_defect": None, "defect_type": None, "confidence": None}
                
                # 提取关键信息
                has_defect = response.get("has_defect", None)
                defect_type = response.get("defect_type", "unknown")
                confidence = response.get("confidence", 0.0)
                analysis = response.get("analysis", "")
                
                # 确定真实标签
                true_label = os.path.basename(os.path.dirname(img_path))
                true_has_defect = true_label.lower() != "good"
                
                # 判断是否正确
                is_correct = (has_defect == true_has_defect) if has_defect is not None else False
                
                # 显示结果
                result_html = f"<h4>判别结果:</h4>"
                result_html += f"<p>真实标签: <b>{true_label}</b> ({'有缺陷' if true_has_defect else '无缺陷'})</p>"
                result_html += f"<p>预测结果: <b>{defect_type}</b> ({'有缺陷' if has_defect else '无缺陷'})</p>"
                result_html += f"<p>置信度: <b>{confidence:.4f}</b></p>"
                result_html += f"<p>判断: <b style='color:{'green' if is_correct else 'red'};'>{'✓ 正确' if is_correct else '✗ 错误'}</b></p>"
                result_html += f"<p>分析: {analysis}</p>"
                
                current_result_holder.markdown(result_html, unsafe_allow_html=True)
                
                # 短暂暂停，让用户有时间查看
                time.sleep(0.2)
            
            # 产品级别的评估
            for i, product in enumerate(selected_products):
                # 更新状态
                status_text.text(f"正在评估产品 {i+1}/{len(selected_products)}: {product}...")
                progress_bar.progress((i) / len(selected_products))
                
                # 检查知识库是否已存在
                product_kb_exists = False
                try:
                    # 尝试加载知识库
                    if product in st.session_state.knowledge_base.get_product_list():
                        st.info(f"知识库 {product} 已存在，将直接使用")
                        product_kb_exists = st.session_state.knowledge_base.load_or_create(product)
                except Exception as e:
                    st.warning(f"加载知识库 {product} 失败: {str(e)}")
                
                # 如果知识库不存在，则创建
                if not product_kb_exists:
                    with st.spinner(f"为 {product} 构建知识库..."):
                        train_dir = os.path.join(data_dir, product, "train")
                        st.session_state.knowledge_base.build_from_folder(
                            product_name=product,
                            data_dir=train_dir
                        )
                
                # 定义产品特定回调函数
                def product_callback(img_path, retrieval_results, prompt, result):
                    display_evaluation_progress(img_path, retrieval_results, prompt, result, product)
                
                # 运行评估，确保每个产品仅在其自己的知识库中进行RAG
                with st.spinner(f"评估 {product} 的性能..."):
                    metrics = evaluator.evaluate_product(
                        product_name=product,
                        detect_fn=detect_function,
                        knowledge_base=st.session_state.knowledge_base,
                        callback=product_callback if eval_params.get("show_detailed_progress", True) else None
                    )
                
                # 显示当前产品的指标
                metrics_df = pd.DataFrame({
                    "指标": ["准确率", "精确率", "召回率", "F1分数", "AUC", "特异度", "MCC"],
                    "值": [
                        metrics.get("accuracy", float('nan')),
                        metrics.get("precision", float('nan')),
                        metrics.get("recall", float('nan')),
                        metrics.get("f1", float('nan')),
                        metrics.get("auc", float('nan')),
                        metrics.get("specificity", float('nan')),
                        metrics.get("mcc", float('nan'))
                    ]
                })
                
                metrics_placeholder.dataframe(metrics_df, use_container_width=True)
                
                # 保存指标
                all_metrics[product] = metrics
                
                # 如果达到自动保存间隔，则保存中间结果
                auto_save_interval = eval_params.get("auto_save_interval", 1)
                if (i+1) % auto_save_interval == 0 or i == len(selected_products) - 1:
                    # 计算并保存当前的总体指标
                    avg_metrics = evaluator._compute_average_metrics(all_metrics)
                    all_metrics["average"] = avg_metrics
                    
                    # 保存当前结果
                    summary_path = os.path.join(result_dir, "summary_interim.json")
                    with open(summary_path, 'w', encoding='utf-8') as f:
                        json.dump(all_metrics, f, ensure_ascii=False, indent=2)
                    
                    # 生成中间图表
                    evaluator._generate_summary_charts(all_metrics)
            
            # 计算总体评估时间
            total_end_time = time.time()
            total_time = total_end_time - total_start_time
            
            # 最终评估完成，汇总所有产品的指标
            all_metrics = evaluator.evaluate_all_products(selected_products, detect_function, st.session_state.knowledge_base, None)
            
            # 完成评估
            progress_bar.progress(1.0)
            status_text.text("评估完成！")
            
            # 显示总体结果
            avg_metrics = all_metrics.get("average", {})
            
            st.success(f"评估完成！总共评估了 {len(selected_products)} 个产品，用时 {total_time:.2f} 秒")
            
            st.subheader("总体评估结果")
            
            metrics_df = pd.DataFrame({
                "指标": ["准确率", "精确率", "召回率", "F1分数", "AUC", "特异度", "阴性预测值", "假阳性率", "假阴性率", "MCC"],
                "值": [
                    avg_metrics.get("accuracy", float('nan')),
                    avg_metrics.get("precision", float('nan')),
                    avg_metrics.get("recall", float('nan')),
                    avg_metrics.get("f1", float('nan')),
                    avg_metrics.get("auc", float('nan')),
                    avg_metrics.get("specificity", float('nan')),
                    avg_metrics.get("npv", float('nan')),
                    avg_metrics.get("fpr", float('nan')),
                    avg_metrics.get("fnr", float('nan')),
                    avg_metrics.get("mcc", float('nan'))
                ]
            })
            
            st.dataframe(metrics_df, use_container_width=True)
            
            # 显示每个产品的结果
            st.subheader("各产品评估结果")
            
            product_metrics = []
            for product in selected_products:
                metrics = all_metrics.get(product, {})
                product_metrics.append({
                    "产品": product,
                    "准确率": metrics.get("accuracy", float('nan')),
                    "精确率": metrics.get("precision", float('nan')),
                    "召回率": metrics.get("recall", float('nan')),
                    "F1分数": metrics.get("f1", float('nan')),
                    "AUC": metrics.get("auc", float('nan')),
                    "特异度": metrics.get("specificity", float('nan')),
                    "MCC": metrics.get("mcc", float('nan'))
                })
            
            product_df = pd.DataFrame(product_metrics)
            st.dataframe(product_df, use_container_width=True)
            
            # 结果保存提示
            st.info(f"详细结果已保存至 {result_dir} 目录")
            
            # 查看图表
            if st.button("查看评估图表"):
                tab3.open()  # 自动打开结果可视化标签页
    
    # 查看结果页面
    with tab2:
        st.header("查看评估结果")
        
        # 检查结果目录
        if not os.path.exists(result_dir):
            st.warning("未找到评估结果目录")
            return
        
        # 获取实验日志
        experiment_logs = get_experiment_logs(result_dir)
        
        if not experiment_logs:
            st.warning("未找到任何实验记录")
            return
        
        # 显示实验列表
        st.subheader("历史实验")
        
        experiment_list = []
        for log in experiment_logs:
            experiment_list.append({
                "实验名称": log.get("experiment_name", "未命名"),
                "时间": log.get("timestamp", ""),
                "CLIP模型": log.get("params", {}).get("clip_model", ""),
                "VLLM模型": log.get("params", {}).get("vllm_model", ""),
                "产品数": len(log.get("params", {}).get("products", []))
            })
        
        experiment_df = pd.DataFrame(experiment_list)
        st.dataframe(experiment_df, use_container_width=True)
        
        # 加载评估结果
        st.subheader("结果查看")
        
        # 选择要查看的产品
        product_to_view = st.selectbox("选择产品", ["总体结果"] + products)
        
        if product_to_view == "总体结果":
            # 加载总体结果
            summary_results = load_evaluation_results(result_dir)
            
            if not summary_results:
                st.warning("未找到总体评估结果")
                return
            
            # 显示总体指标
            avg_metrics = summary_results.get("average", {})
            
            st.markdown("**总体评估指标**")
            metrics_df = pd.DataFrame({
                "指标": ["准确率", "精确率", "召回率", "F1分数", "AUC", "特异度", "阴性预测值", "假阳性率", "假阴性率", "MCC"],
                "值": [
                    avg_metrics.get("accuracy", float('nan')),
                    avg_metrics.get("precision", float('nan')),
                    avg_metrics.get("recall", float('nan')),
                    avg_metrics.get("f1", float('nan')),
                    avg_metrics.get("auc", float('nan')),
                    avg_metrics.get("specificity", float('nan')),
                    avg_metrics.get("npv", float('nan')),
                    avg_metrics.get("fpr", float('nan')),
                    avg_metrics.get("fnr", float('nan')),
                    avg_metrics.get("mcc", float('nan'))
                ]
            })
            
            st.dataframe(metrics_df, use_container_width=True)
            
            # 显示产品对比表格
            st.markdown("**产品对比**")
            products_in_result = [p for p in summary_results.keys() if p != "average"]
            
            if products_in_result:
                product_comparison = []
                for product in products_in_result:
                    product_metrics = summary_results.get(product, {})
                    product_comparison.append({
                        "产品": product,
                        "准确率": product_metrics.get("accuracy", float('nan')),
                        "精确率": product_metrics.get("precision", float('nan')),
                        "召回率": product_metrics.get("recall", float('nan')),
                        "F1分数": product_metrics.get("f1", float('nan')),
                        "特异度": product_metrics.get("specificity", float('nan')),
                        "MCC": product_metrics.get("mcc", float('nan'))
                    })
                
                product_df = pd.DataFrame(product_comparison)
                st.dataframe(product_df, use_container_width=True)
                
                # 添加排序功能
                sort_by = st.selectbox("按指标排序:", ["准确率", "精确率", "召回率", "F1分数", "特异度", "MCC"])
                if sort_by:
                    sorted_df = product_df.sort_values(by=sort_by, ascending=False)
                    st.dataframe(sorted_df, use_container_width=True)
            
            # 显示评估报告
            report_path = os.path.join(result_dir, "evaluation_report.md")
            if os.path.exists(report_path):
                with open(report_path, 'r', encoding='utf-8') as f:
                    report_content = f.read()
                
                with st.expander("查看详细评估报告", expanded=False):
                    st.markdown(report_content)
                
                # 提供下载评估报告选项
                st.download_button(
                    label="下载评估报告 (Markdown)",
                    data=report_content,
                    file_name="defect_rag_evaluation_report.md",
                    mime="text/markdown"
                )
        else:
            # 加载特定产品的结果
            product_result_dir = os.path.join(result_dir, product_to_view)
            
            if not os.path.exists(product_result_dir):
                st.warning(f"未找到 {product_to_view} 的评估结果")
                return
            
            # 加载指标
            metrics_path = os.path.join(product_result_dir, "metrics.json")
            if os.path.exists(metrics_path):
                with open(metrics_path, 'r', encoding='utf-8') as f:
                    metrics = json.load(f)
                
                st.markdown(f"**{product_to_view} 评估指标**")
                metrics_df = pd.DataFrame({
                    "指标": ["准确率", "精确率", "召回率", "F1分数", "AUC", "特异度", "阴性预测值", "MCC"],
                    "值": [
                        metrics.get("accuracy", float('nan')),
                        metrics.get("precision", float('nan')),
                        metrics.get("recall", float('nan')),
                        metrics.get("f1", float('nan')),
                        metrics.get("auc", float('nan')),
                        metrics.get("specificity", float('nan')),
                        metrics.get("npv", float('nan')),
                        metrics.get("mcc", float('nan'))
                    ]
                })
                
                st.dataframe(metrics_df, use_container_width=True)
                
                # 显示缺陷类型分析（如果有）
                if "class_metrics" in metrics:
                    st.markdown("**各缺陷类型准确率**")
                    
                    class_metrics = metrics.get("class_metrics", {})
                    classes = []
                    
                    for defect_type, values in class_metrics.items():
                        classes.append({
                            "缺陷类型": defect_type,
                            "准确率": values.get("accuracy", 0.0)
                        })
                    
                    class_df = pd.DataFrame(classes)
                    st.dataframe(class_df, use_container_width=True)
            
            # 显示混淆矩阵
            confusion_matrix_path = os.path.join(product_result_dir, "confusion_matrix.png")
            if os.path.exists(confusion_matrix_path):
                st.markdown("**混淆矩阵**")
                st.image(confusion_matrix_path)
            
            # 显示各缺陷类型准确率
            class_accuracy_path = os.path.join(product_result_dir, "class_accuracy.png")
            if os.path.exists(class_accuracy_path):
                st.markdown("**各缺陷类型准确率**")
                st.image(class_accuracy_path)
            
            # 加载详细结果
            results_path = os.path.join(product_result_dir, "all_results.csv")
            if os.path.exists(results_path):
                st.markdown("**详细检测结果**")
                
                df = pd.read_csv(results_path)
                
                # 添加过滤选项
                with st.expander("结果过滤选项", expanded=False):
                    # 筛选标签
                    if "true_label" in df.columns:
                        labels = ["全部"] + df["true_label"].unique().tolist()
                        selected_label = st.selectbox("选择标签类型", labels)
                        
                        # 筛选结果类型
                        result_type = st.radio("结果类型", ["全部", "正确", "错误"])
                    
                    # 应用过滤
                    filtered_df = df.copy()
                    
                    # 过滤标签
                    if selected_label != "全部" and "true_label" in df.columns:
                        filtered_df = filtered_df[filtered_df["true_label"] == selected_label]
                    
                    # 过滤结果类型
                    if result_type == "正确" and "true_has_defect" in df.columns and "pred_has_defect" in df.columns:
                        filtered_df = filtered_df[filtered_df["true_has_defect"] == filtered_df["pred_has_defect"]]
                    elif result_type == "错误" and "true_has_defect" in df.columns and "pred_has_defect" in df.columns:
                        filtered_df = filtered_df[filtered_df["true_has_defect"] != filtered_df["pred_has_defect"]]
                
                # 显示过滤后的数据
                st.dataframe(filtered_df, use_container_width=True)
                
                # 选择查看具体样本
                st.markdown("**查看具体样本**")
                
                true_labels = df["true_label"].unique()
                selected_label = st.selectbox("选择标签类型", true_labels, key="sample_label_selector")
                
                if selected_label:
                    # 筛选指定标签的样本
                    filtered_df = df[df["true_label"] == selected_label]
                    
                    # 随机选择一个样本
                    if not filtered_df.empty:
                        sample_index = st.number_input("选择样本索引", min_value=0, max_value=len(filtered_df)-1, value=0)
                        
                        sample = filtered_df.iloc[sample_index]
                        image_path = sample["image_path"]
                        
                        # 显示样本信息
                        col1, col2 = st.columns(2)
                        
                        with col1:
                            # 显示图片
                            if os.path.exists(image_path):
                                img = Image.open(image_path)
                                st.image(img, caption=f"样本图片: {os.path.basename(image_path)}")
                        
                        with col2:
                            # 显示检测结果
                            st.markdown("**检测结果**")
                            st.markdown(f"真实标签: **{sample['true_label']}**")
                            st.markdown(f"预测: **{sample['pred_defect_type']}**")
                            st.markdown(f"置信度: **{sample['confidence']}**")
                            st.markdown(f"分析: {sample['analysis']}")
                
                # 提供下载选项
                st.download_button(
                    label="下载完整结果 (CSV)",
                    data=df.to_csv(index=False).encode('utf-8-sig'),
                    file_name=f"{product_to_view}_results.csv",
                    mime="text/csv"
                )
    
    # 结果可视化标签页
    with tab3:
        st.header("结果可视化")
        
        # 检查结果目录
        if not os.path.exists(result_dir):
            st.warning("未找到评估结果目录")
            return
        
        # 加载最新的评估结果
        summary_results = load_evaluation_results(result_dir)
        
        if not summary_results:
            st.warning("未找到评估结果")
            return
        
        # 显示可视化选项
        st.subheader("可视化选项")
        
        visual_type = st.radio(
            "选择可视化类型",
            ["柱状图比较", "雷达图", "热力图", "单指标比较", "混淆矩阵"],
            horizontal=True
        )
        
        if visual_type == "柱状图比较":
            metrics_comparison_path = os.path.join(result_dir, "metrics_comparison.png")
            if os.path.exists(metrics_comparison_path):
                st.image(metrics_comparison_path, caption="产品评估指标对比")
                
                # 提供下载选项
                with open(metrics_comparison_path, "rb") as file:
                    st.download_button(
                        label="下载柱状图 (PNG)",
                        data=file,
                        file_name="metrics_comparison.png",
                        mime="image/png"
                    )
            else:
                st.warning("未找到柱状图比较")
        
        elif visual_type == "雷达图":
            radar_chart_path = os.path.join(result_dir, "radar_chart.png")
            if os.path.exists(radar_chart_path):
                st.image(radar_chart_path, caption="评估指标雷达图")
                
                # 提供下载选项
                with open(radar_chart_path, "rb") as file:
                    st.download_button(
                        label="下载雷达图 (PNG)",
                        data=file,
                        file_name="radar_chart.png",
                        mime="image/png"
                    )
            else:
                st.warning("未找到雷达图")
        
        elif visual_type == "热力图":
            heatmap_path = os.path.join(result_dir, "product_comparison_heatmap.png")
            if os.path.exists(heatmap_path):
                st.image(heatmap_path, caption="产品性能指标热力图")
                
                # 提供下载选项
                with open(heatmap_path, "rb") as file:
                    st.download_button(
                        label="下载热力图 (PNG)",
                        data=file,
                        file_name="product_comparison_heatmap.png",
                        mime="image/png"
                    )
            else:
                st.warning("未找到热力图")
        
        elif visual_type == "单指标比较":
            # 选择要查看的指标
            metrics = ["accuracy", "precision", "recall", "f1", "specificity"]
            metric_names = {
                "accuracy": "准确率",
                "precision": "精确率",
                "recall": "召回率",
                "f1": "F1分数",
                "specificity": "特异度"
            }
            
            selected_metric = st.selectbox("选择指标", list(metric_names.keys()), format_func=lambda x: metric_names[x])
            
            metric_path = os.path.join(result_dir, f"{selected_metric}_comparison.png")
            if os.path.exists(metric_path):
                st.image(metric_path, caption=f"{metric_names[selected_metric]}指标对比")
                
                # 提供下载选项
                with open(metric_path, "rb") as file:
                    st.download_button(
                        label=f"下载{metric_names[selected_metric]}图表 (PNG)",
                        data=file,
                        file_name=f"{selected_metric}_comparison.png",
                        mime="image/png"
                    )
            else:
                st.warning(f"未找到{metric_names[selected_metric]}指标比较图")
        
        elif visual_type == "混淆矩阵":
            # 选择要查看的产品
            products_in_result = [p for p in summary_results.keys() if p != "average"]
            
            if not products_in_result:
                st.warning("未找到产品结果")
                return
            
            selected_product = st.selectbox("选择产品", products_in_result)
            
            if selected_product:
                confusion_matrix_path = os.path.join(result_dir, selected_product, "confusion_matrix.png")
                
                if os.path.exists(confusion_matrix_path):
                    st.image(confusion_matrix_path, caption=f"{selected_product}混淆矩阵")
                    
                    # 提供下载选项
                    with open(confusion_matrix_path, "rb") as file:
                        st.download_button(
                            label="下载混淆矩阵 (PNG)",
                            data=file,
                            file_name=f"{selected_product}_confusion_matrix.png",
                            mime="image/png"
                        )
                else:
                    st.warning(f"未找到{selected_product}的混淆矩阵")
        
        # 生成图表集合下载选项
        st.subheader("图表集合下载")
        
        # 检查是否有足够的图表可供下载
        chart_files = glob.glob(os.path.join(result_dir, "*.png"))
        if len(chart_files) > 0:
            if st.button("下载所有图表"):
                # 创建ZIP文件
                
                zip_buffer = io.BytesIO()
                with zipfile.ZipFile(zip_buffer, "w", zipfile.ZIP_DEFLATED) as zip_file:
                    for chart_path in chart_files:
                        filename = os.path.basename(chart_path)
                        zip_file.write(chart_path, filename)
                
                # 提供下载
                st.download_button(
                    label="下载图表ZIP包",
                    data=zip_buffer.getvalue(),
                    file_name="defect_rag_charts.zip",
                    mime="application/zip"
                )
    
    st.markdown("---")
    
    st.markdown("""
    ### 使用说明
    
    1. **运行评估**:
       - 选择要评估的产品（可选择所有产品）
       - 设置实验名称
       - 点击"开始评估"按钮
       - 系统会自动构建知识库并评估所有测试样本
       - 评估完成后会显示总体指标和各产品指标
    
    2. **查看结果**:
       - 查看历史实验记录
       - 选择要查看的产品
       - 查看评估指标、混淆矩阵和详细结果
       - 可以浏览具体样本的检测结果
    
    3. **结果可视化**:
       - 查看各种可视化图表
       - 下载单个图表或图表集合
       - 分析产品间性能差异
    
    评估结果会保存在 ./evaluation_results 目录下，包括指标、图表和详细报告。
    """) 