import os
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from typing import List, Dict, Any, Union, Tuple, Callable
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix
from sklearn.metrics import matthews_corrcoef
import glob
from tqdm import tqdm
import shutil
from datetime import datetime
import matplotlib
from collections import Counter
import requests

matplotlib.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
matplotlib.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

class EnhancedEvaluator:
    """增强型系统评估类，支持多种算法比较"""
    
    def __init__(self, data_dir: str = "./data", result_dir: str = "./evaluation_results"):
        """
        初始化评估器
        
        Args:
            data_dir: 数据目录
            result_dir: 结果保存目录
        """
        self.data_dir = data_dir
        self.result_dir = result_dir
        os.makedirs(result_dir, exist_ok=True)
        
        # 评估结果
        self.results = {}
        self.algorithm_names = {
            "clip_voting": "CLIP投票算法",
            "vllm_only": "纯VLLM算法",
            "combined": "CLIP+RAG+VLLM算法"
        }
    
    def _compute_average_metrics(self, product_metrics: Dict) -> Dict:
        """
        计算所有产品的平均指标
        
        Args:
            product_metrics: 所有产品的评估指标
            
        Returns:
            平均指标
        """
        avg_metrics = {}
        metric_keys = ["accuracy", "precision", "recall", "f1", "auc", "specificity", "npv", "fpr", "fnr", "mcc"]
        
        # 只考虑产品，不包括已有的average
        products = [p for p in product_metrics.keys() if p != "average"]
        
        for key in metric_keys:
            valid_values = [product_metrics.get(p, {}).get(key, float('nan')) for p in products]
            valid_values = [v for v in valid_values if not np.isnan(v)]
            
            if valid_values:
                avg_metrics[key] = float(np.mean(valid_values))
            else:
                avg_metrics[key] = float('nan')
        
        return avg_metrics
    
    def _clip_voting(self, retrieval_results: List[Dict]) -> Dict:
        """
        使用CLIP检索结果进行投票，确定缺陷类型
        
        Args:
            retrieval_results: 知识库检索结果列表
            
        Returns:
            预测结果
        """
        if not retrieval_results:
            return {
                "has_defect": False, 
                "defect_type": "good", 
                "confidence": 0.0,
                "analysis": "无法获取检索结果"
            }
        
        # 提取所有标签
        labels = [r.get("label", "unknown") for r in retrieval_results]
        # 计算每个标签出现的次数
        label_counts = Counter(labels)
        
        # 获取出现次数最多的标签
        most_common = label_counts.most_common(1)[0]
        most_common_label = most_common[0]
        label_frequency = most_common[1] / len(labels)
        
        # 计算加权得分
        weighted_scores = {}
        for r in retrieval_results:
            label = r.get("label", "unknown")
            similarity = r.get("similarity", 0)
            
            if label not in weighted_scores:
                weighted_scores[label] = 0
            
            weighted_scores[label] += similarity
        
        # 找出加权分数最高的标签
        max_score_label = max(weighted_scores.items(), key=lambda x: x[1])
        weighted_label = max_score_label[0]
        weighted_confidence = max_score_label[1] / sum(weighted_scores.values()) if sum(weighted_scores.values()) > 0 else 0
        
        # 选择最终标签：优先使用加权得分，但如果最频繁标签的频率特别高，则使用它
        final_label = weighted_label
        final_confidence = weighted_confidence
        
        if label_frequency > 0.6:
            final_label = most_common_label
            final_confidence = label_frequency
        
        # 判断是否有缺陷
        has_defect = final_label.lower() != "good"
        
        # 增加一些分析信息
        analysis = f"基于CLIP检索的{len(retrieval_results)}个结果进行投票，主要检测到的类型是{final_label}，置信度{final_confidence:.2f}"
        
        return {
            "has_defect": has_defect,
            "defect_type": final_label,
            "confidence": final_confidence,
            "analysis": analysis
        }
    
    def _vllm_only_detect(self, vllm_model, image_path: str, product_name: str) -> Dict:
        """
        仅使用VLLM模型进行检测，不使用RAG
        
        Args:
            vllm_model: VLLM模型对象
            image_path: 图片路径
            product_name: 产品名称
            
        Returns:
            预测结果
        """
        # 转换图片为base64
        image_b64 = vllm_model.image_to_base64(image_path)
        if not image_b64:
            return {
                "success": False,
                "error": "无法处理图片",
                "response": {
                    "has_defect": False,
                    "defect_type": "unknown",
                    "confidence": 0.0,
                    "analysis": "无法处理图片"
                }
            }
        
        # 为NEU-DET数据集添加特殊处理
        special_instructions = ""
        if product_name.lower() == "neu-det":
            special_instructions = """
对于钢材表面缺陷检测，请特别注意以下几点：

1. 龟裂(crazing)通常表现为网状细小裂纹，在光照下会形成特殊的反光图案，与划痕(scratches)的线状特征有明显区别。
2. 夹杂物(inclusion)通常是点状或不规则形状的异物，与点蚀(pitted_surface)的凹坑特征不同，夹杂物可能呈现为凸起或嵌入物。
3. 斑块(patches)表现为颜色变化区域，通常没有明显的深度变化，而点蚀和夹杂物通常伴随表面形态的变化。
4. 点蚀表面(pitted_surface)的特征是表面有凹坑或孔洞，通常在光照下会有明显的阴影效果。
5. 划痕(scratches)是线状损伤，通常有明显的方向性，与龟裂的网状特征有本质区别。"""
        
        # 构建提示词 - 不包含检索结果
        prompt = f"""你是一个产品表面缺陷检测专家，请仔细分析图片中的{product_name}产品，并检测是否存在表面缺陷。

请特别注意:
1. 仔细区分正常产品(good)和有缺陷产品的区别
2. 只有当产品表面完全无缺陷时，才应判断为"good"
3. 如有任何可疑特征，应详细分析是否为缺陷

{special_instructions}

请分析图片并输出标准JSON格式结果，包括:
1. has_defect: 布尔值，表示是否存在缺陷
2. defect_type: 如存在缺陷，提供缺陷类型；如无缺陷，则为"good"
3. confidence: 置信度，0-1的浮点数
4. analysis: 简短分析说明（不超过100字）

只需要输出JSON格式结果，不要有其他回复。"""

        # 构建API请求
        payload = {
            "model": vllm_model.model_name,
            "prompt": prompt,
            "stream": False,
            "images": [image_b64],
            "format": "json"
        }
        
        try:
            response = requests.post(f"{vllm_model.base_url}/api/generate", json=payload)
            if response.status_code == 200:
                result = response.json()
                # 尝试解析JSON响应
                try:
                    return {
                        "success": True,
                        "response": json.loads(result["response"])
                    }
                except json.JSONDecodeError:
                    # 如果无法解析为JSON，返回原始文本
                    return {
                        "success": True,
                        "response": result["response"]
                    }
            else:
                return {
                    "success": False, 
                    "error": f"API调用失败: HTTP {response.status_code}",
                    "response": {
                        "has_defect": False,
                        "defect_type": "unknown",
                        "confidence": 0.0,
                        "analysis": f"API调用失败: HTTP {response.status_code}"
                    }
                }
        except Exception as e:
            return {
                "success": False,
                "error": f"调用模型时发生错误: {str(e)}",
                "response": {
                    "has_defect": False,
                    "defect_type": "unknown",
                    "confidence": 0.0,
                    "analysis": f"调用模型时发生错误: {str(e)}"
                }
            }
    
    def evaluate_product_with_all_algorithms(
        self, 
        product_name: str, 
        vllm_model, 
        knowledge_base, 
        combined_detect_fn: Callable,
        callback=None
    ) -> Dict[str, Dict]:
        """
        使用所有三种算法评估单个产品的检测性能
        
        Args:
            product_name: 产品名称
            vllm_model: VLLM模型对象
            knowledge_base: 知识库对象
            combined_detect_fn: 组合算法的检测函数
            callback: 回调函数，用于展示评估过程中的信息
            
        Returns:
            各算法的评估指标
        """
        # 测试集路径
        test_dir = os.path.join(self.data_dir, product_name, "test")
        
        if not os.path.exists(test_dir):
            print(f"错误: 测试集目录不存在 {test_dir}")
            return {}
        
        # 创建结果保存目录
        product_result_dir = os.path.join(self.result_dir, product_name)
        os.makedirs(product_result_dir, exist_ok=True)
        
        # 获取所有测试样本
        subdirs = [d for d in os.listdir(test_dir) if os.path.isdir(os.path.join(test_dir, d))]
        
        # 获取该产品所有可能的缺陷类型（包括good）
        defect_types = []
        for subdir in subdirs:
            defect_types.append(subdir)
        
        # 创建三种算法的结果容器
        algorithms = ["clip_voting", "vllm_only", "combined"]
        all_results = {alg: [] for alg in algorithms}
        y_true = {alg: [] for alg in algorithms}
        y_pred = {alg: [] for alg in algorithms}
        y_scores = {alg: [] for alg in algorithms}
        
        # 准备每个缺陷类型的详细评估结果
        class_metrics = {
            alg: {defect_type: {'correct': 0, 'total': 0} for defect_type in defect_types} 
            for alg in algorithms
        }
        
        print(f"评估产品: {product_name}")
        
        # 对每个子目录进行评估
        for subdir in subdirs:
            subdir_path = os.path.join(test_dir, subdir)
            image_files = []
            
            # 获取所有支持的图片文件
            for ext in ['png', 'jpg', 'jpeg']:
                image_files.extend(glob.glob(os.path.join(subdir_path, f"*.{ext}")))
            
            if not image_files:
                print(f"警告: {subdir_path} 中没有找到图片")
                continue
            
            print(f"处理 {subdir} 样本 ({len(image_files)} 张图片)...")
            
            # 创建类别结果保存目录
            class_result_dir = os.path.join(product_result_dir, subdir)
            os.makedirs(class_result_dir, exist_ok=True)
            
            # 处理每张图片
            for img_path in tqdm(image_files):
                # 检索知识库
                retrieval_results = knowledge_base.search(img_path, top_k=5, product_name=product_name)
                
                # 1. CLIP投票算法
                clip_result = self._clip_voting(retrieval_results)
                
                # 2. 纯VLLM算法
                vllm_result = self._vllm_only_detect(vllm_model, img_path, product_name)
                
                # 3. 组合算法 (CLIP+RAG+VLLM)
                combined_result = combined_detect_fn(img_path, product_name, retrieval_results)
                
                # 将三种算法的结果放入对应容器
                all_results["clip_voting"].append({
                    "path": img_path,
                    "true_label": subdir,
                    "prediction": clip_result
                })
                
                all_results["vllm_only"].append({
                    "path": img_path,
                    "true_label": subdir,
                    "prediction": vllm_result.get("response", {})
                })
                
                all_results["combined"].append({
                    "path": img_path,
                    "true_label": subdir,
                    "prediction": combined_result.get("response", {})
                })
                
                # 确定真实标签
                true_label = subdir
                true_has_defect = true_label.lower() != "good"
                
                # 更新各算法的评估指标
                for alg in algorithms:
                    # 获取对应算法的预测结果
                    result_container = all_results[alg][-1]
                    pred_result = result_container["prediction"]
                    
                    # 获取预测值，处理不同格式的结果
                    if isinstance(pred_result, str):
                        try:
                            # 尝试提取JSON部分
                            start = pred_result.find("{")
                            end = pred_result.rfind("}") + 1
                            if start >= 0 and end > start:
                                json_str = pred_result[start:end]
                                pred_result = json.loads(json_str)
                        except:
                            pred_result = {"has_defect": None, "defect_type": None, "confidence": None}
                    
                    # 提取关键信息
                    has_defect = pred_result.get("has_defect", None)
                    defect_type = pred_result.get("defect_type", "unknown")
                    confidence = pred_result.get("confidence", 0.0)
                    
                    # 如果无法解析has_defect，尝试从defect_type推断
                    if has_defect is None and defect_type is not None:
                        has_defect = defect_type.lower() != "good" and defect_type.lower() != "无"
                    
                    # 更新分类指标
                    y_true[alg].append(true_has_defect)
                    y_pred[alg].append(has_defect)
                    y_scores[alg].append(confidence if has_defect else 1 - confidence)
                    
                    # 更新类别指标
                    class_metrics[alg][true_label]["total"] += 1
                    if (has_defect == true_has_defect) and (true_has_defect is False or defect_type.lower() == true_label.lower()):
                        class_metrics[alg][true_label]["correct"] += 1
                
                # 如果有回调函数，调用它展示当前处理的图片和结果
                if callback:
                    callback(img_path, retrieval_results, all_results)
        
        # 计算各算法的评估指标
        algorithm_metrics = {}
        
        for alg in algorithms:
            metrics = {}
            
            # 计算二分类指标
            if len(set(y_true[alg])) > 1:  # 确保有正负样本
                metrics["accuracy"] = accuracy_score(y_true[alg], y_pred[alg])
                metrics["precision"] = precision_score(y_true[alg], y_pred[alg], zero_division=0)
                metrics["recall"] = recall_score(y_true[alg], y_pred[alg], zero_division=0)
                metrics["f1"] = f1_score(y_true[alg], y_pred[alg], zero_division=0)
                
                # 计算AUC，如果y_scores不为空
                if len(y_scores[alg]) > 0 and len(set(y_true[alg])) > 1:
                    try:
                        metrics["auc"] = roc_auc_score(y_true[alg], y_scores[alg])
                    except:
                        metrics["auc"] = float('nan')
                else:
                    metrics["auc"] = float('nan')
                
                # 计算混淆矩阵
                tn, fp, fn, tp = confusion_matrix(y_true[alg], y_pred[alg]).ravel()
                
                # 计算额外指标
                specificity = tn / (tn + fp) if (tn + fp) > 0 else 0
                npv = tn / (tn + fn) if (tn + fn) > 0 else 0
                fpr = fp / (fp + tn) if (fp + tn) > 0 else 0
                fnr = fn / (fn + tp) if (fn + tp) > 0 else 0
                
                metrics["specificity"] = specificity
                metrics["npv"] = npv
                metrics["fpr"] = fpr
                metrics["fnr"] = fnr
                
                # 计算MCC (Matthews相关系数)
                metrics["mcc"] = matthews_corrcoef(y_true[alg], y_pred[alg])
                
                # 保存混淆矩阵
                metrics["confusion_matrix"] = {
                    "tn": int(tn),
                    "fp": int(fp),
                    "fn": int(fn),
                    "tp": int(tp)
                }
                
                # 计算类别级指标
                metrics["class_metrics"] = {}
                for class_name, class_data in class_metrics[alg].items():
                    accuracy = class_data["correct"] / class_data["total"] if class_data["total"] > 0 else 0
                    metrics["class_metrics"][class_name] = {
                        "accuracy": accuracy,
                        "correct": class_data["correct"],
                        "total": class_data["total"]
                    }
            else:
                for metric in ["accuracy", "precision", "recall", "f1", "auc", "specificity", "npv", "fpr", "fnr", "mcc"]:
                    metrics[metric] = float('nan')
            
            algorithm_metrics[alg] = metrics
        
        # 保存评估结果
        results_path = os.path.join(product_result_dir, "algorithm_comparison.json")
        with open(results_path, 'w', encoding='utf-8') as f:
            json.dump(algorithm_metrics, f, ensure_ascii=False, indent=2)
        
        return algorithm_metrics
    
    def evaluate_all_products(
        self, 
        products: List[str], 
        vllm_model,
        knowledge_base, 
        combined_detect_fn: Callable,
        callback=None
    ) -> Dict[str, Dict]:
        """
        评估所有产品在三种算法下的性能
        
        Args:
            products: 产品名称列表
            vllm_model: VLLM模型对象
            knowledge_base: 知识库对象
            combined_detect_fn: 组合算法的检测函数
            callback: 回调函数
            
        Returns:
            评估结果
        """
        all_metrics = {}
        
        # 评估每个产品
        for product in products:
            print(f"评估产品: {product}")
            
            # 确保知识库已加载
            try:
                if product in knowledge_base.get_product_list():
                    knowledge_base.load_or_create(product)
            except Exception as e:
                print(f"警告: 加载知识库失败 {str(e)}")
                continue
            
            # 评估所有算法
            metrics = self.evaluate_product_with_all_algorithms(
                product_name=product,
                vllm_model=vllm_model,
                knowledge_base=knowledge_base,
                combined_detect_fn=combined_detect_fn,
                callback=callback
            )
            
            all_metrics[product] = metrics
        
        # 计算平均指标
        avg_metrics = {}
        for alg in ["clip_voting", "vllm_only", "combined"]:
            # 从所有产品中提取该算法的指标
            alg_metrics_across_products = {
                product: metrics.get(alg, {}) 
                for product, metrics in all_metrics.items()
            }
            
            # 计算平均值
            avg_metrics[alg] = self._compute_average_metrics(alg_metrics_across_products)
        
        all_metrics["average"] = avg_metrics
        
        # 保存总体结果
        summary_path = os.path.join(self.result_dir, "algorithm_comparison_summary.json")
        with open(summary_path, 'w', encoding='utf-8') as f:
            json.dump(all_metrics, f, ensure_ascii=False, indent=2)
        
        # 生成性能对比图表
        self._generate_algorithm_comparison_charts(all_metrics)
        
        # 生成算法对比报告
        self._generate_algorithm_comparison_report(all_metrics)
        
        return all_metrics
    
    def _generate_algorithm_comparison_charts(self, all_metrics: Dict) -> None:
        """
        生成算法性能对比图表
        
        Args:
            all_metrics: 评估指标
        """
        # 确保目录存在
        charts_dir = os.path.join(self.result_dir, "charts")
        os.makedirs(charts_dir, exist_ok=True)
        
        # 提取平均指标
        avg_metrics = all_metrics.get("average", {})
        
        # 如果没有平均指标，则返回
        if not avg_metrics:
            return
        
        # 1. 对比主要指标的柱状图
        plt.figure(figsize=(12, 8))
        
        # 要比较的指标
        metrics_to_plot = ["accuracy", "precision", "recall", "f1", "specificity", "mcc"]
        metrics_names = ["准确率", "精确率", "召回率", "F1分数", "特异度", "MCC"]
        
        # 提取算法名称和对应的中文名
        algorithms = list(avg_metrics.keys())
        alg_labels = [self.algorithm_names.get(alg, alg) for alg in algorithms]
        
        # 设置x位置
        x = np.arange(len(metrics_names))
        width = 0.25  # 柱宽度
        
        # 绘制柱状图
        for i, alg in enumerate(algorithms):
            alg_metrics = avg_metrics[alg]
            values = [alg_metrics.get(metric, 0) for metric in metrics_to_plot]
            plt.bar(x + width * (i - 1), values, width, label=alg_labels[i])
        
        plt.xlabel('评估指标')
        plt.ylabel('得分')
        plt.title('算法性能对比')
        plt.xticks(x, metrics_names)
        plt.legend()
        plt.grid(axis='y', linestyle='--', alpha=0.7)
        
        # 保存图表
        plt.tight_layout()
        plt.savefig(os.path.join(charts_dir, "algorithm_comparison_metrics.png"), dpi=300)
        plt.close()
        
        # 2. 产品级别的算法性能对比
        # 排除平均指标
        products = [p for p in all_metrics.keys() if p != "average"]
        
        if products:
            # 对每个主要指标绘制产品级别的对比图
            for metric_idx, metric in enumerate(metrics_to_plot):
                plt.figure(figsize=(14, 8))
                
                # 提取每个产品的该指标数据
                product_data = {}
                for product in products:
                    product_metrics = all_metrics.get(product, {})
                    product_data[product] = [
                        product_metrics.get(alg, {}).get(metric, 0) for alg in algorithms
                    ]
                
                # 设置x位置
                x = np.arange(len(products))
                width = 0.25  # 柱宽度
                
                # 绘制柱状图
                for i, alg in enumerate(algorithms):
                    values = [all_metrics.get(product, {}).get(alg, {}).get(metric, 0) for product in products]
                    plt.bar(x + width * (i - 1), values, width, label=alg_labels[i])
                
                plt.xlabel('产品')
                plt.ylabel(metrics_names[metric_idx])
                plt.title(f'各产品的{metrics_names[metric_idx]}对比')
                plt.xticks(x, products, rotation=45)
                plt.legend()
                plt.grid(axis='y', linestyle='--', alpha=0.7)
                
                # 保存图表
                plt.tight_layout()
                plt.savefig(os.path.join(charts_dir, f"product_comparison_{metric}.png"), dpi=300)
                plt.close()
    
    def _generate_algorithm_comparison_report(self, all_metrics: Dict) -> None:
        """
        生成算法性能对比报告
        
        Args:
            all_metrics: 评估指标
        """
        # 提取平均指标
        avg_metrics = all_metrics.get("average", {})
        
        # 如果没有平均指标，则返回
        if not avg_metrics:
            return
        
        # 开始生成报告
        report_content = "# 缺陷检测算法性能对比报告\n\n"
        report_content += f"生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
        
        # 添加整体性能总结
        report_content += "## 整体性能总结\n\n"
        report_content += "本报告对比了三种缺陷检测算法的性能：\n\n"
        report_content += f"1. **{self.algorithm_names['clip_voting']}**: 仅使用CLIP RAG检索结果进行投票\n"
        report_content += f"2. **{self.algorithm_names['vllm_only']}**: 仅使用Qwen 2.5 VL大模型直接识别\n"
        report_content += f"3. **{self.algorithm_names['combined']}**: 结合CLIP+RAG+VLLM的综合方法\n\n"
        
        # 添加指标对比表格
        report_content += "### 关键指标对比\n\n"
        report_content += "| 指标 | " + " | ".join([self.algorithm_names[alg] for alg in avg_metrics.keys()]) + " |\n"
        report_content += "| ---- | " + " | ".join(["----" for _ in avg_metrics.keys()]) + " |\n"
        
        metrics_to_show = {
            "accuracy": "准确率",
            "precision": "精确率",
            "recall": "召回率",
            "f1": "F1分数",
            "auc": "AUC",
            "specificity": "特异度",
            "mcc": "MCC"
        }
        
        for metric, metric_name in metrics_to_show.items():
            values = []
            for alg in avg_metrics.keys():
                value = avg_metrics[alg].get(metric, float('nan'))
                if not np.isnan(value):
                    values.append(f"{value:.4f}")
                else:
                    values.append("N/A")
            
            report_content += f"| {metric_name} | " + " | ".join(values) + " |\n"
        
        # 添加各产品性能分析
        report_content += "\n## 各产品性能分析\n\n"
        
        # 排除平均指标
        products = [p for p in all_metrics.keys() if p != "average"]
        
        for product in products:
            report_content += f"### {product} 产品\n\n"
            
            product_metrics = all_metrics.get(product, {})
            
            # 添加该产品的指标表格
            report_content += "| 指标 | " + " | ".join([self.algorithm_names[alg] for alg in product_metrics.keys()]) + " |\n"
            report_content += "| ---- | " + " | ".join(["----" for _ in product_metrics.keys()]) + " |\n"
            
            for metric, metric_name in metrics_to_show.items():
                values = []
                for alg in product_metrics.keys():
                    value = product_metrics[alg].get(metric, float('nan'))
                    if not np.isnan(value):
                        values.append(f"{value:.4f}")
                    else:
                        values.append("N/A")
                
                report_content += f"| {metric_name} | " + " | ".join(values) + " |\n"
            
            # 添加类别准确率对比（如果有）
            has_class_metrics = False
            for alg in product_metrics.keys():
                if "class_metrics" in product_metrics[alg]:
                    has_class_metrics = True
                    break
            
            if has_class_metrics:
                report_content += "\n#### 各缺陷类型准确率\n\n"
                
                # 收集所有可能的类别名称
                class_names = set()
                for alg in product_metrics.keys():
                    class_metrics = product_metrics[alg].get("class_metrics", {})
                    class_names.update(class_metrics.keys())
                
                # 添加类别准确率表格
                report_content += "| 缺陷类型 | " + " | ".join([self.algorithm_names[alg] for alg in product_metrics.keys()]) + " |\n"
                report_content += "| -------- | " + " | ".join(["----" for _ in product_metrics.keys()]) + " |\n"
                
                for class_name in sorted(class_names):
                    values = []
                    for alg in product_metrics.keys():
                        class_metrics = product_metrics[alg].get("class_metrics", {})
                        class_data = class_metrics.get(class_name, {})
                        accuracy = class_data.get("accuracy", float('nan'))
                        
                        if not np.isnan(accuracy):
                            values.append(f"{accuracy:.4f}")
                        else:
                            values.append("N/A")
                    
                    report_content += f"| {class_name} | " + " | ".join(values) + " |\n"
        
        # 添加结论和建议
        report_content += "\n## 结论与建议\n\n"
        
        # 找出整体性能最好的算法
        best_algorithm = None
        best_f1 = -1
        
        for alg in avg_metrics.keys():
            alg_f1 = avg_metrics[alg].get("f1", 0)
            if alg_f1 > best_f1:
                best_f1 = alg_f1
                best_algorithm = alg
        
        if best_algorithm:
            report_content += f"根据评估结果，整体表现最好的算法是**{self.algorithm_names[best_algorithm]}**，其综合F1分数为{best_f1:.4f}。\n\n"
        
        # 各算法优势分析
        report_content += "### 各算法优势分析\n\n"
        report_content += f"1. **{self.algorithm_names['clip_voting']}**: "
        report_content += "这种方法依赖于相似图片检索和投票机制，在处理有充分参考样本的情况下表现较好，且计算速度快，但对新类型缺陷的泛化能力有限。\n\n"
        
        report_content += f"2. **{self.algorithm_names['vllm_only']}**: "
        report_content += "这种方法利用视觉语言模型的强大泛化能力，可以识别训练数据中未见过的缺陷类型，但可能缺乏对特定产品缺陷的专业知识。\n\n"
        
        report_content += f"3. **{self.algorithm_names['combined']}**: "
        report_content += "这种方法结合了检索增强和大模型推理的优势，通过知识库为大模型提供参考，提升了专业性和泛化能力，但计算成本较高。\n\n"
        
        # 保存报告
        report_path = os.path.join(self.result_dir, "algorithm_comparison_report.md")
        with open(report_path, 'w', encoding='utf-8') as f:
            f.write(report_content)
        
        print(f"算法比较报告已保存至: {report_path}") 