#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import logging
import re
import torch
import jieba
import nltk
from typing import Dict, Any, List, Set, Tuple

# 先设置日志，这样路径设置时可以记录日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 设置NLTK数据路径，优先使用patient文件夹下的nltk_data
def set_nltk_data_path():
    """设置NLTK数据路径"""
    patient_nltk_path = '/mnt/ssd/jsj/patient/nltk_data'
    logger.info(f"尝试添加NLTK数据路径: {patient_nltk_path}")
    if os.path.exists(patient_nltk_path):
        logger.info(f"路径存在，检查是否在数据路径列表中")
        if patient_nltk_path not in nltk.data.path:
            nltk.data.path.insert(0, patient_nltk_path)
            logger.info(f"已添加路径到NLTK数据路径列表: {patient_nltk_path}")
        else:
            logger.info(f"路径已经在NLTK数据路径列表中: {patient_nltk_path}")
    else:
        logger.error(f"路径不存在: {patient_nltk_path}")

# 初始化设置NLTK数据路径
try:
    set_nltk_data_path()
except Exception as e:
    logger.warning(f"设置NLTK数据路径时出错: {str(e)}")

class Evaluator:
    """
    评估器类，负责计算文本相似度评估指标
    """
    
    def __init__(self, config):
        """
        初始化评估器
        
        Args:
            config: 配置对象，包含设备设置等信息
        """
        # 保存配置对象
        self.config = config
        
        # 设置Jieba缓存目录，避免在无写权限的/tmp目录创建缓存文件
        # 选择一个有写权限的目录作为Jieba缓存目录
        jieba_cache_dir = os.environ.get("JIEBA_CACHE_DIR", "/mnt/ssd/jsj/jieba_cache")
        os.environ["JIEBA_CACHE_DIR"] = jieba_cache_dir
        
        # 确保缓存目录存在
        try:
            os.makedirs(jieba_cache_dir, exist_ok=True)
            logger.info(f"Jieba缓存目录设置为: {jieba_cache_dir}")
            # 初始化jieba分词
            jieba.initialize()
        except Exception as e:
            logger.warning(f"无法创建Jieba缓存目录或初始化jieba，可能会影响中文分词性能: {str(e)}")
            
        # 初始化模型缓存
        self.sentence_transformer_model = None
        self.chinese_sentence_transformer_model = None
        self.bertscore_model_path = None
        self.bertscore_device = None
        
        # 预加载模型以提高后续调用性能
        self._preload_models()
        
    def _preload_models(self):
        """预加载评估所需的模型"""
        try:
            # 设置离线模式环境变量，强制使用本地模型
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            os.environ['HF_HUB_OFFLINE'] = '1'
            logger.info("已设置离线模式，强制使用本地模型")
            
            # 配置使用本地BERT模型
            self.bertscore_model_path = '/mnt/ssd/jsj/models/models/tiansz/bert-base-chinese'
            logger.info(f"使用本地BERT模型: {self.bertscore_model_path}")
            
            # 验证本地模型路径是否有效
            if not self._validate_model_path(self.bertscore_model_path):
                logger.error("本地BERT模型路径无效，尝试回退到在线模型")
                # 移除离线模式环境变量，允许使用在线模型
                if 'TRANSFORMERS_OFFLINE' in os.environ:
                    del os.environ['TRANSFORMERS_OFFLINE']
                if 'HF_HUB_OFFLINE' in os.environ:
                    del os.environ['HF_HUB_OFFLINE']
                self.bertscore_model_path = 'bert-base-chinese'
                logger.info(f"回退到在线BERT模型: {self.bertscore_model_path}")
            
            # 设置设备，处理config为None的情况
            if self.config is None or not hasattr(self.config, 'DEVICE'):
                # 自动检测设备
                self.bertscore_device = 'cuda' if torch.cuda.is_available() else 'cpu'
                logger.info(f"未指定设备，自动检测为: {self.bertscore_device}")
            else:
                self.bertscore_device = self.config.DEVICE
                logger.info(f"使用配置中的设备: {self.bertscore_device}")
            
            # 现在再预加载模型，确保使用已设置的设备
            try:
                from sentence_transformers import SentenceTransformer
                device = self.bertscore_device
                
                # 预加载原始SentenceTransformer模型
                local_model_path = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
                logger.info(f"预加载SentenceTransformer模型: {local_model_path}")
                self.sentence_transformer_model = SentenceTransformer(local_model_path, device=device)
                logger.info(f"SentenceTransformer模型预加载完成，使用设备: {device}")
                
                # 预加载中文Sentence-BERT模型 - 使用用户指定的模型
                chinese_st_model_path = "/mnt/ssd/jsj/models/models/Jerry0/text2vec-large-chinese"
                logger.info(f"预加载中文Sentence-BERT模型: {chinese_st_model_path}")
                self.chinese_sentence_transformer_model = SentenceTransformer(chinese_st_model_path, device=device)
                logger.info(f"中文Sentence-BERT模型预加载完成，使用设备: {device}")
                
            except Exception as e:
                logger.warning(f"模型预加载失败: {str(e)}")
                # 保留已加载的模型，未加载的设为None
                if not hasattr(self, 'sentence_transformer_model'):
                    self.sentence_transformer_model = None
                if not hasattr(self, 'chinese_sentence_transformer_model'):
                    self.chinese_sentence_transformer_model = None
        except Exception as e:
            logger.error(f"模型预加载过程中发生错误: {str(e)}")
        
    def _validate_model_path(self, path):
        """验证模型路径是否有效"""
        # 检查路径是否存在
        if not os.path.exists(path):
            logger.error(f"模型路径不存在: {path}")
            return False
            
        # 定义必需的文件列表，考虑到model.safetensors替代pytorch_model.bin的情况
        required_files = ['config.json', 'vocab.txt']
        # 检查权重文件，支持两种格式
        weight_files = ['pytorch_model.bin', 'model.safetensors']
        has_weight_file = False
        
        # 检查必需文件
        missing = []
        for file in required_files:
            if not os.path.exists(os.path.join(path, file)):
                missing.append(file)
                
        # 检查权重文件
        for file in weight_files:
            if os.path.exists(os.path.join(path, file)):
                has_weight_file = True
                break
                
        if not has_weight_file:
            missing.append(f"(至少需要一个权重文件: {', '.join(weight_files)})")
            
        if missing:
            logger.error(f"模型路径不完整，缺少文件: {', '.join(missing)}")
            return False
            
        # 检查tokenizer文件
        tokenizer_files = ['tokenizer.json', 'tokenizer_config.json']
        has_tokenizer = False
        for file in tokenizer_files:
            if os.path.exists(os.path.join(path, file)):
                has_tokenizer = True
                break
                
        if not has_tokenizer:
            logger.warning(f"缺少tokenizer文件，可能影响分词性能: {', '.join(tokenizer_files)}")
            
        logger.info(f"模型路径验证通过: {path}")
        return True

    def calculate_cosine_similarity(self, reference: str, hypothesis: str) -> float:
        """
        计算文本余弦相似度
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            余弦相似度分数
        """
        try:
            from sentence_transformers import SentenceTransformer
            import numpy as np
            from sklearn.metrics.pairwise import cosine_similarity
            import os
            
            # 设置离线模式
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            os.environ['HF_HUB_OFFLINE'] = '1'
            
            # 检查模型是否已加载，如果没有则加载
            if self.sentence_transformer_model is None:
                local_model_path = "/mnt/ssd/jsj/models/models/sentence-transformers/all-MiniLM-L6-v2"
                logger.info(f"加载SentenceTransformer模型: {local_model_path}")
                # 确保SentenceTransformer模型使用与BERTScore相同的设备
                # 由于在_preload_models中已经确保self.bertscore_device一定存在，这里直接使用
                device = self.bertscore_device
                self.sentence_transformer_model = SentenceTransformer(local_model_path, device=device)
                logger.info(f"SentenceTransformer模型加载完成，使用设备: {device}")
            
            # 使用已加载的模型
            ref_embedding = self.sentence_transformer_model.encode([reference])
            hyp_embedding = self.sentence_transformer_model.encode([hypothesis])
            
            # 计算余弦相似度
            similarity = cosine_similarity(ref_embedding, hyp_embedding)[0][0]
            return round(similarity, 4)
            
        except ImportError as e:
            logger.error(f"无法导入所需的库: {str(e)}")
            return 0.0
        except Exception as e:
            logger.error(f"计算余弦相似度时发生错误: {str(e)}")
            return 0.0
    
    def calculate_chinese_sentence_bert_similarity(self, reference: str, hypothesis: str) -> float:
        """
        计算中文Sentence-BERT相似度 (使用 shibing624/text2vec-base-chinese)
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            中文Sentence-BERT相似度分数
        """
        try:
            from sentence_transformers import SentenceTransformer
            import numpy as np
            from sklearn.metrics.pairwise import cosine_similarity
            import os
            
            # 设置离线模式
            os.environ['TRANSFORMERS_OFFLINE'] = '1'
            os.environ['HF_HUB_OFFLINE'] = '1'
            
            # 检查中文Sentence-BERT模型是否已加载，如果没有则加载
            if self.chinese_sentence_transformer_model is None:
                local_model_path = "/mnt/ssd/jsj/models/models/Jerry0/text2vec-large-chinese"
                logger.info(f"加载中文Sentence-BERT模型: {local_model_path}")
                # 确保使用正确的设备
                device = self.bertscore_device if hasattr(self, 'bertscore_device') else ('cuda' if torch.cuda.is_available() else 'cpu')
                
                try:
                    self.chinese_sentence_transformer_model = SentenceTransformer(local_model_path, device=device)
                    logger.info(f"中文Sentence-BERT模型加载完成，使用设备: {device}")
                except Exception as e:
                    logger.error(f"加载中文Sentence-BERT模型失败: {str(e)}")
                    # 回退到原始的余弦相似度计算
                    return self.calculate_cosine_similarity(reference, hypothesis)
            
            # 使用中文模型计算相似度
            ref_embedding = self.chinese_sentence_transformer_model.encode([reference])
            hyp_embedding = self.chinese_sentence_transformer_model.encode([hypothesis])
            
            # 计算余弦相似度
            similarity = cosine_similarity(ref_embedding, hyp_embedding)[0][0]
            return round(similarity, 4)
            
        except ImportError as e:
            logger.error(f"无法导入所需的库: {str(e)}")
            return self.calculate_cosine_similarity(reference, hypothesis)
        except Exception as e:
            logger.error(f"计算中文Sentence-BERT相似度时发生错误: {str(e)}")
            return self.calculate_cosine_similarity(reference, hypothesis)
    


    def calculate_bertscore(self, reference: str, hypothesis: str) -> float:
        """
        计算BERTScore相似度（兼容旧版本bert-score）
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            BERTScore分数
        """
        try:
            import os
            import time
            from bert_score import score
            import torch
    
            
            # 设置超时时间为10秒
            start_time = time.time()
            
            # 尝试加载模型并计算分数
            with torch.no_grad():
                # 简化模型加载，移除model_kwargs参数
                # 手动截断文本，因为bert_score库版本不支持max_length参数
                from transformers import AutoTokenizer
                
                # 加载tokenizer进行文本切分
                tokenizer = AutoTokenizer.from_pretrained(
                    self.bertscore_model_path
                )
                
                def truncate_text(text, max_len=512, tokenizer=None):
                    """使用tokenizer截断文本"""
                    # 编码文本（返回包含特殊符号的ids）
                    encoded = tokenizer.encode(text, add_special_tokens=False)  # 先不添加特殊符号
                    # 预留2个位置给[CLS]和[SEP]
                    max_available = max_len - 2
                    if len(encoded) > max_available:
                        encoded = encoded[:max_available]
                    # 转换回文本
                    return tokenizer.decode(encoded, skip_special_tokens=True)
                
                # 对假设文本和参考文本进行截断
                truncated_hypothesis = truncate_text(hypothesis, max_len=512, tokenizer=tokenizer)
                truncated_reference = truncate_text(reference, max_len=512, tokenizer=tokenizer)
                
                # 计算BERTScore
                P, R, F1 = score(
                    [truncated_hypothesis],
                    [truncated_reference],
                    model_type=self.bertscore_model_path,
                    num_layers=8,
                    lang="zh",
                    device=self.bertscore_device,
                    verbose=False
                )
            
            # 检查是否超时
            if time.time() - start_time > 10:
                logger.warning("BERTScore计算超时，切换到简化计算方法")
                return self._simple_similarity_score(reference, hypothesis)
            
            score_value = round(float(F1.item()), 4)
            logger.debug(f"BERTScore计算结果: {score_value}")
            return score_value
            
        except ImportError as e:
            logger.error(f"无法导入bert_score库: {str(e)}")
            return self._simple_similarity_score(reference, hypothesis)
        except RuntimeError as e:
            # 处理内存/显存不足和meta tensor的情况
            error_msg = str(e)
            if "CUDA out of memory" in error_msg or "memory" in error_msg.lower():
                logger.warning(f"计算BERTScore时内存不足: {error_msg}，切换到简化计算方法")
                return self._simple_similarity_score(reference, hypothesis)
            # 处理meta tensor错误，这通常发生在模型没有正确加载到指定GPU时
            if "meta tensor" in error_msg or "no data" in error_msg.lower():
                logger.warning(f"计算BERTScore时出现meta tensor错误: {error_msg}，这可能是因为GPU设备冲突。尝试在CPU上重新计算...")
                # 临时切换到CPU并重试
                try:
                    # 手动截断文本，因为bert_score库版本不支持max_length参数
                    from transformers import AutoTokenizer
                    
                    # 加载tokenizer进行文本切分
                    tokenizer = AutoTokenizer.from_pretrained(
                        self.bertscore_model_path
                    )
                    
                    def truncate_text(text, max_len=512, tokenizer=None):
                        """使用tokenizer截断文本"""
                        tokens = tokenizer.tokenize(text)
                        if len(tokens) > max_len:
                            tokens = tokens[:max_len]
                        return tokenizer.convert_tokens_to_string(tokens)
                    
                    # 对假设文本和参考文本进行截断
                    truncated_hypothesis = truncate_text(hypothesis, max_len=512, tokenizer=tokenizer)
                    truncated_reference = truncate_text(reference, max_len=512, tokenizer=tokenizer)
                    
                    # 计算BERTScore
                    P, R, F1 = score(
                        [truncated_hypothesis],
                        [truncated_reference],
                        model_type=self.bertscore_model_path,
                        num_layers=8,
                        max_length=512,   # 限制长度
                        lang="zh",
                        device="cpu",
                        verbose=False
                    )
                    return round(float(F1.item()), 4)
                except Exception:
                    logger.warning("CPU上计算也失败，切换到简化计算方法")
                    return self._simple_similarity_score(reference, hypothesis)
            # 处理张量维度不匹配的错误，通常是因为输入文本过长
            if "size mismatch" in error_msg or "must match the existing size" in error_msg:
                logger.warning(f"计算BERTScore时出现张量维度不匹配错误: {error_msg}，这可能是因为输入文本过长。切换到简化计算方法...")
                return self._simple_similarity_score(reference, hypothesis)
            logger.error(f"计算BERTScore时发生运行时错误: {error_msg}")
            return self._simple_similarity_score(reference, hypothesis)
        except Exception as e:
            logger.error(f"计算BERTScore时发生错误: {str(e)}")
            return self._simple_similarity_score(reference, hypothesis)

    def _simple_similarity_score(self, reference: str, hypothesis: str) -> float:
        """
        简化的文本相似度计算方法，作为BERTScore的备选
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            简化的相似度分数
        """
        try:
            # 使用字符重叠率作为简单的相似度指标
            if not reference or not hypothesis:
                return 0.0
            
            # 计算字符级别的Jaccard相似度
            ref_chars = set(reference)
            hyp_chars = set(hypothesis)
            intersection = len(ref_chars.intersection(hyp_chars))
            union = len(ref_chars.union(hyp_chars))
            
            if union == 0:
                return 0.0
                
            return round(intersection / union, 4)
        except Exception:
            return 0.0
    
    def calculate_bleu(self, reference: str, hypothesis: str) -> Dict[str, float]:
        """
        计算BLEU相似度分数（计算BLEU-1、BLEU-2、BLEU-3和BLEU-4的所有分数）
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            包含BLEU-1、BLEU-2、BLEU-3和BLEU-4分数的字典
        """
        try:
            import nltk
            from nltk.translate.bleu_score import sentence_bleu, SmoothingFunction
            from nltk.tokenize import word_tokenize
            
            # 下载必要的nltk资源
            # try:
            #     nltk.data.find('tokenizers/punkt')
            # except LookupError:
            #     nltk.download('punkt', quiet=True)
            
            # 对中文使用jieba分词（如果可用），否则使用字分词
            try:
                import jieba
                reference_tokens = list(jieba.cut(reference))
                hypothesis_tokens = list(jieba.cut(hypothesis))
                logger.debug("使用jieba进行中文分词")
            except ImportError:
                logger.warning("未安装jieba，使用字分词")
                reference_tokens = list(reference)
                hypothesis_tokens = list(hypothesis)
            
            # 使用平滑函数处理短文本
            smoothie = SmoothingFunction().method4  # 更适合短文本的平滑方法
            
            # 计算所有BLEU分数，确保返回浮点数类型
            bleu_scores = {
                'bleu-1': float(round(sentence_bleu(
                    [reference_tokens], 
                    hypothesis_tokens, 
                    weights=(1.0, 0.0, 0.0, 0.0),  # BLEU-1权重
                    smoothing_function=smoothie
                ), 4)),
                'bleu-2': float(round(sentence_bleu(
                    [reference_tokens], 
                    hypothesis_tokens, 
                    weights=(0.5, 0.5, 0.0, 0.0),  # BLEU-2权重
                    smoothing_function=smoothie
                ), 4)),
                'bleu-3': float(round(sentence_bleu(
                    [reference_tokens], 
                    hypothesis_tokens, 
                    weights=(0.333, 0.333, 0.334, 0.0),  # BLEU-3权重
                    smoothing_function=smoothie
                ), 4)),
                'bleu-4': float(round(sentence_bleu(
                    [reference_tokens], 
                    hypothesis_tokens, 
                    weights=(0.25, 0.25, 0.25, 0.25),  # BLEU-4权重
                    smoothing_function=smoothie
                ), 4))
            }
            
            return bleu_scores
        except ImportError as e:
            logger.error(f"无法导入所需的库: {str(e)}")
            return {'bleu-1': 0.0, 'bleu-2': 0.0, 'bleu-3': 0.0, 'bleu-4': 0.0}
        except Exception as e:
            logger.error(f"计算BLEU分数时发生错误: {str(e)}")
            return {'bleu-1': 0.0, 'bleu-2': 0.0, 'bleu-3': 0.0, 'bleu-4': 0.0}
    
    def calculate_rouge(self, reference: str, hypothesis: str) -> Dict[str, float]:
        """
        计算ROUGE相似度分数（标准实现）
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            ROUGE分数字典（包含precision、recall和f1-score）
        """
        try:
            # 导入自定义分词器
            from custom_tokenizer import ChineseTokenizer
            
            # 创建中文分词器实例（使用字分词确保基本功能正常）
            tokenizer = ChineseTokenizer(char_based=True)  # 先使用字分词确保基本功能正常
            
            # 导入ROUGE库
            from rouge_score import rouge_scorer
            
            # 使用标准的rouge_score库，传入自定义分词器
            scorer = rouge_scorer.RougeScorer(
                ['rouge1', 'rouge2', 'rougeL'], 
                use_stemmer=False,
                tokenizer=tokenizer  # 传入自定义分词器
            )
            
            # 计算ROUGE分数
            scores = scorer.score(reference, hypothesis)
            
            # 构建完整的结果字典
            result = {
                'rouge-1': round(scores['rouge1'].fmeasure, 4),
                'rouge-2': round(scores['rouge2'].fmeasure, 4),
                'rouge-l': round(scores['rougeL'].fmeasure, 4),
                # 'rouge-1_p': round(scores['rouge1'].precision, 4),
                # 'rouge-1_r': round(scores['rouge1'].recall, 4),
                # 'rouge-2_p': round(scores['rouge2'].precision, 4),
                # 'rouge-2_r': round(scores['rouge2'].recall, 4),
                # 'rouge-l_p': round(scores['rougeL'].precision, 4),
                # 'rouge-l_r': round(scores['rougeL'].recall, 4)
            }
            return result
        except ImportError as e:
            logger.error(f"无法导入rouge_score库或自定义分词器: {str(e)}")
            return {'rouge-1': 0.0, 'rouge-2': 0.0, 'rouge-l': 0.0}
    
    def calculate_meteor(self, reference: str, hypothesis: str) -> float:
        """
        计算METEOR相似度分数（标准词级别实现）
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
        
        Returns:
            METEOR分数
        """
        try:
            import nltk
            from nltk.translate import meteor_score
            
            # 设置NLTK数据路径，优先使用patient文件夹下的nltk_data
            patient_nltk_path = '/mnt/ssd/jsj/patient/nltk_data'
            if patient_nltk_path not in nltk.data.path:
                nltk.data.path.insert(0, patient_nltk_path)
                logger.info(f"已添加NLTK数据路径: {patient_nltk_path}")
            
            # 尝试使用NLTK的METEOR评分功能
            try:
                # 检查必要的nltk资源
                # try:
                #     nltk.data.find('corpora/wordnet')
                #     nltk.data.find('taggers/averaged_perceptron_tagger')
                # except LookupError:
                #     # 尝试下载资源，但添加网络异常处理
                #     try:
                #         logger.info("尝试下载NLTK资源")
                #         nltk.download('wordnet', quiet=True)
                #         nltk.download('averaged_perceptron_tagger', quiet=True)
                #     except Exception as download_error:
                #         logger.warning(f"无法下载NLTK资源（网络可能不可用）: {str(download_error)}")
                #         # 使用简化的实现，不依赖外部资源
                #         logger.info("使用简化版METEOR评分，不依赖NLTK资源")
                #         # 对于无法下载资源的情况，返回0.0或使用其他替代方法
                #         return 0.0
                
                # 对中文使用jieba分词（如果可用）
                try:
                    import jieba
                    reference_tokens = list(jieba.cut(reference))
                    hypothesis_tokens = list(jieba.cut(hypothesis))
                    logger.debug("使用jieba进行中文分词")
                except ImportError:
                    logger.warning("未安装jieba，使用字分词")
                    reference_tokens = list(reference)
                    hypothesis_tokens = list(hypothesis)
                
                # 计算METEOR分数（使用标准实现）
                meteor = meteor_score.single_meteor_score(reference_tokens, hypothesis_tokens)
                
                return round(meteor, 4)
            except Exception as meteor_error:
                # 任何METEOR计算过程中的错误都降级处理
                logger.warning(f"METEOR评分计算失败: {str(meteor_error)}")
                return 0.0
        except ImportError as e:
            logger.error(f"无法导入所需的库: {str(e)}")
            return 0.0
        except Exception as e:
            logger.error(f"计算METEOR分数时发生错误: {str(e)}")
            return 0.0
    


    def calculate_all_metrics(
        self, 
        reference: str, 
        hypothesis: str, 
        config: Dict[str, Any]
    ) -> Dict[str, Any]:
        """
        计算文本相似度评估指标
        
        Args:
            reference: 参考文本
            hypothesis: 待评估文本
            config: 评估配置
        
        Returns:
            包含评估指标的字典
        """
        results = {
            'reference': reference,
            'hypothesis': hypothesis,
            'text_metrics': {}
        }
        
        # 计算核心文本相似度指标
        results['cosine_similarity'] = self.calculate_cosine_similarity(reference, hypothesis)
        results['bertscore'] = self.calculate_bertscore(reference, hypothesis)
        
        # 计算中文特定的评估指标
        results['chinese_sentence_bert_similarity'] = self.calculate_chinese_sentence_bert_similarity(reference, hypothesis)
        
        # 计算BLEU分数
        results['bleu'] = self.calculate_bleu(reference, hypothesis)
        
        # 计算ROUGE分数（包含多个子指标）
        rouge_scores = self.calculate_rouge(reference, hypothesis)
        results.update(rouge_scores)
        
        # 计算METEOR分数
        results['meteor'] = self.calculate_meteor(reference, hypothesis)
        
        return results

# 为了兼容旧的函数式调用方式，并避免重复加载模型，使用懒加载模式
_evaluator_instance = None

def get_evaluator_instance(config):
    """获取全局Evaluator实例，采用懒加载模式"""
    global _evaluator_instance
    if _evaluator_instance is None:
        logger.info("创建全局Evaluator实例")
        _evaluator_instance = Evaluator(config)
    return _evaluator_instance

def calculate_cosine_similarity(reference: str, hypothesis: str) -> float:
    return get_evaluator_instance().calculate_cosine_similarity(reference, hypothesis)

def calculate_bertscore(reference: str, hypothesis: str) -> float:
    return get_evaluator_instance().calculate_bertscore(reference, hypothesis)

def calculate_all_metrics(reference: str, hypothesis: str, config: Dict[str, Any]) -> Dict[str, Any]:
    return get_evaluator_instance().calculate_all_metrics(reference, hypothesis, config)