import torch
import numpy as np
import models
import WordMetrics
import WordMatching as wm
import epitran
import ModelInterfaces as mi
import AIModels
import RuleBasedModels
from string import punctuation
import time
import math

# 全局缓存，用于存储已经初始化好的Trainer实例
_trainer_cache = {}

def getTrainer(language, device="cpu"):
    """
    获取一个发音评估器（Trainer）实例。
    为了极大地提高性能，此函数会缓存创建的Trainer实例。
    后续对相同语言和设备的请求将直接返回缓存中的实例，
    避免了反复加载庞大的ASR模型。

    Args:
        language (str): 评估的语言 (例如, 'en', 'zh').
        device (str): 计算设备 (例如, 'cpu', 'cuda').

    Returns:
        PronunciationTrainer: 一个配置好的评估器实例。
    """
    # 确保我们有一个确定的设备字符串用于缓存键
    if device is None or not isinstance(device, str):
        actual_device = "cuda" if torch.cuda.is_available() else "cpu"
    else:
        actual_device = device

    cache_key = (language, actual_device)
    if cache_key in _trainer_cache:
        print(f"[PronunciationTrainer] ✅ Returning cached trainer for lang='{language}' on device='{actual_device}'")
        return _trainer_cache[cache_key]

    print(f"[PronunciationTrainer] Creating new trainer for lang='{language}' on device='{actual_device}'...")
    
    asr_model = models.getASRModel(language, use_whisper=True, device=actual_device)
    word_matcher = wm.WordMatcher(language)
    
    trainer = PronunciationTrainer(asr_model=asr_model, word_matcher=word_matcher, language=language)
    
    # 将新创建的实例存入缓存
    _trainer_cache[cache_key] = trainer
    print(f"[PronunciationTrainer] ✨ New trainer created and cached.")
    
    return trainer


class PronunciationTrainer:
    """
    这个类是发音评估的核心，它整合了ASR模型、单词匹配和IPA转换功能。
    """
    current_transcript: str
    current_ipa: str

    current_recorded_audio: torch.Tensor
    current_recorded_transcript: str
    current_recorded_word_locations: list
    current_recorded_intonations: torch.tensor
    current_words_pronunciation_accuracy = []
    categories_thresholds = np.array([80, 60, 59])

    sampling_rate = 16000

    def __init__(self, asr_model, word_matcher, language):
        """
        最终修复的初始化方法。
        """
        self.asr_model = asr_model
        self.word_matcher = word_matcher
        self.language = language
        # 真正最终的修复：从 RuleBasedModels.py 获取 IPA 转换器
        if language == 'en':
            self.phonem_converter = RuleBasedModels.EngPhonemConverter()
        elif language == 'de':
            # Assuming German model exists as per original structure
            self.phonem_converter = RuleBasedModels.EpitranPhonemConverter(epitran.Epitran('deu-Latn'))
        else:
            # Fallback or error for other languages
            raise NotImplementedError(f"Language '{language}' does not have a supported IPA converter.")
        print(f"[PronunciationTrainer] Initialized for language: {language}")

    def getTranscriptAndWordsLocations(self, audio_length_in_samples: int):
        audio_transcript = self.asr_model.getTranscript()
        raw_word_locations = self.asr_model.getWordLocations()

        fade_duration_in_samples = 0.05 * self.sampling_rate
        processed_locations_tuples = []
        if raw_word_locations:
            for word_info in raw_word_locations:
                if isinstance(word_info, dict) and \
                   word_info.get('start_ts') is not None and \
                   word_info.get('end_ts') is not None:
                    start_ts = int(np.maximum(0, word_info['start_ts'] - fade_duration_in_samples))
                    end_ts = int(np.minimum(audio_length_in_samples - 1, word_info['end_ts'] + fade_duration_in_samples))
                    processed_locations_tuples.append((start_ts, end_ts))
                else:
                    print(f"跳过格式错误的单词位置信息: {word_info}")
        return audio_transcript, processed_locations_tuples

    def getWordsRelativeIntonation(self, Audio: torch.tensor, word_locations_tuples: list):
        if not word_locations_tuples:
            return torch.empty(0,1) # Return empty tensor if no locations
        
        intonations = torch.zeros((len(word_locations_tuples), 1))
        intonation_fade_samples = 0.3 * self.sampling_rate
        # print(f"Intonations shape: {intonations.shape}") # Commented out verbose print
        for word_idx, loc_tuple in enumerate(word_locations_tuples):
            if not (isinstance(loc_tuple, tuple) and len(loc_tuple) == 2):
                print(f"跳过音调分析中格式错误的位置信息: {loc_tuple}")
                intonations[word_idx] = 0 # Assign a default for malformed
                continue
            intonation_start = int(np.maximum(0, loc_tuple[0] - intonation_fade_samples))
            intonation_end = int(np.minimum(Audio.shape[1] - 1, loc_tuple[1] + intonation_fade_samples))
            if intonation_end > intonation_start:
                intonations[word_idx] = torch.sqrt(torch.mean(Audio[0][intonation_start:intonation_end]**2))
            else:
                intonations[word_idx] = 0
        
        mean_intonation = torch.mean(intonations)
        if mean_intonation > 1e-6:
            intonations = intonations / mean_intonation
        return intonations

    def processAudioForGivenText(self, recordedAudio: torch.Tensor = None, real_text=None):
        # 确保 device 的传递和使用是正确的
        device = "cuda" if torch.cuda.is_available() else "cpu"
        if not isinstance(recordedAudio, torch.Tensor):
            recordedAudio = torch.tensor(recordedAudio, dtype=torch.float32)
        recordedAudio = recordedAudio.to(device)

        start_time_nn = time.time()
        recording_transcript, recording_ipa, word_locations_processed_tuples = self.getAudioTranscript(recordedAudio)
        print(f'神经网络转录音频耗时: {time.time()-start_time_nn:.4f}s')

        # 新增：获取ASR模型检测到的语言
        detected_lang = self.asr_model.getDetectedLanguage()
        print(f"[Language Detection] Expected: '{self.language}', Detected: '{detected_lang}'")

        # 智能语言处理逻辑
        if self.language != detected_lang:
            print(f"[Language Mismatch] ⚠️  检测到语言不匹配！")
            print(f"  期望语言: {self.language}")
            print(f"  检测语言: {detected_lang}")
            
            # 如果用户设置英文但说中文，提供友好提示
            if self.language == 'en' and detected_lang == 'zh':
                print("[建议] 您似乎在说中文，但系统设置为英文评估模式。")
                print("       建议将API请求中的'lang'参数改为'zh'以获得更准确的评估。")
                # 不强制设为0分，而是给出警告并继续评估
                completeness_score = 0.0  # 完整度为0，因为语言不匹配
                accuracy_score = 0.0      # 准确度为0，因为无法准确比较
                # 但允许后续流利度评估继续进行
                
        # 如果检测到是纯中文音频但期望是中文，则正常处理
        if detected_lang == 'zh' and self.language == 'zh':
            print("[Language Rule] 检测到中文音频，系统配置为中文模式，正常处理。")
            # 正常处理流程，不提前返回
        elif detected_lang == 'zh' and self.language != 'zh':
            print("[Language Rule] 检测到中文音频但系统非中文模式。建议修改语言参数。")
            # 不提前返回，让评估继续但分数受影响
        elif detected_lang != 'zh':
            print(f"[Language Rule] 检测到{detected_lang}语言音频，继续正常评估流程。")

        # 使用实例化的 word_matcher
        real_and_transcribed_words, real_and_transcribed_words_ipa, mapped_words_indices, word_statuses, num_extra_words = self.matchSampleAndRecordedWords(
            real_text, recording_transcript)

        start_time_str, end_time_str = self.getWordLocationsFromRecordInSeconds(
            word_locations_processed_tuples, mapped_words_indices)

        pronunciation_accuracy, current_words_pronunciation_accuracy = self.getPronunciationAccuracy(
            real_and_transcribed_words_ipa)

        pronunciation_categories = self.getWordsPronunciationCategory(
            current_words_pronunciation_accuracy)
        
        wpm = 0
        if recording_transcript and self.asr_model.getWordLocations():
            recognized_words = recording_transcript.split()
            if recognized_words: 
                valid_word_locations = [
                    loc for loc in self.asr_model.getWordLocations() 
                    if isinstance(loc, dict) and loc.get('start_ts') is not None and loc.get('end_ts') is not None
                ] 
                if valid_word_locations:
                    first_word_start_asr_ticks = valid_word_locations[0]['start_ts']
                    last_word_end_asr_ticks = valid_word_locations[-1]['end_ts']
                    
                    duration_ticks = last_word_end_asr_ticks - first_word_start_asr_ticks
                    duration_seconds = duration_ticks / self.sampling_rate
                    
                    if duration_seconds > 0:
                        num_recognized_words = len(recognized_words)
                        wpm = int(round((num_recognized_words / duration_seconds) * 60))
                    elif num_recognized_words > 0:
                         wpm = num_recognized_words * 60

        pause_analysis = self.analyze_pauses(self.asr_model.getWordLocations())
        
        # 计算三个维度分数
        words_real = real_text.split() if real_text else []
        completeness_score = self.compute_completeness_score(words_real, word_statuses)
        accuracy_score = pronunciation_accuracy
        fluency_score = self.compute_fluency_score(wpm, num_extra_words, pause_analysis['unexpected_pauses_count'])
        
        # 智能语言不匹配处理
        if self.language != detected_lang:
            if self.language == 'en' and detected_lang == 'zh':
                print("[语言不匹配处理] 由于语言不匹配，完整度和准确度设为0分")
                completeness_score = 0.0  # 完整度为0，因为语言不匹配
                accuracy_score = 0.0      # 准确度为0，因为无法准确比较
                # 流利度分数保持不变，仍可以评估语速等
        
        # 新增逻辑：智能单单词处理
        # 如果只有一个参考单词，需要更智能的判断，不仅看文本匹配，还要看发音准确度
        if len(words_real) == 1 and word_statuses:
            status = word_statuses[0]['status']
            clean_real_word = self.removePunctuation(words_real[0]).lower()
            clean_transcribed_word = self.removePunctuation(word_statuses[0]['transcribed_word']).lower()
            
            # 获取发音准确度
            word_pronunciation_accuracy = current_words_pronunciation_accuracy[0] if current_words_pronunciation_accuracy else 0
            
            # 智能判断：如果是明显的错误匹配才严厉惩罚
            is_severe_mismatch = False
            
            if status == 'omitted':
                # 完全没有识别到任何词，这是严重错误
                is_severe_mismatch = True
                print(f"[Single Word Rule] Word completely omitted. Status: {status}. Scores forced to 0.")
            elif word_pronunciation_accuracy < 50:
                # 发音准确度很低，说明确实是错误的词
                is_severe_mismatch = True
                print(f"[Single Word Rule] Poor pronunciation accuracy ({word_pronunciation_accuracy:.1f}%). Real: '{clean_real_word}', Transcribed: '{clean_transcribed_word}'. Scores forced to 0.")
            elif clean_real_word != clean_transcribed_word:
                # 文本不同但发音好，可能是同音异形词，给予宽容
                print(f"[Single Word Rule] Text mismatch but good pronunciation ({word_pronunciation_accuracy:.1f}%). Real: '{clean_real_word}', Transcribed: '{clean_transcribed_word}'. Allowing normal scoring.")
            
            # 只有在严重不匹配时才强制设为0分
            if is_severe_mismatch:
                completeness_score = 0.0
                accuracy_score = 0.0
        
        # 新增逻辑：单个单词测评简化 - 完整度和流利度都以准确度为准
        if len(words_real) == 1:
            print(f"[Single Word Simplification] 单词测评模式：完整度和流利度都以准确度为准 (accuracy: {pronunciation_accuracy})")
            completeness_score = pronunciation_accuracy  # 完整度 = 准确度
            fluency_score = pronunciation_accuracy       # 流利度 = 准确度

        overall_score = self.compute_overall_score(completeness_score, pronunciation_accuracy, fluency_score)

        result = {
            'pronunciation_accuracy': pronunciation_accuracy,  # 保留原有字段以兼容
            'completeness': completeness_score,
            'accuracy': pronunciation_accuracy,  # 明确显示这两个是同一个值
            'fluency': fluency_score,
            'overall_score': overall_score,
            'word_accuracies': current_words_pronunciation_accuracy,
            'recording_transcript': recording_transcript,
            'recording_ipa': recording_ipa,
            'real_and_transcribed_words': real_and_transcribed_words,
            'real_and_transcribed_words_ipa': real_and_transcribed_words_ipa,
            'pronunciation_categories': pronunciation_categories,
            'word_statuses': word_statuses,
            'start_time': start_time_str,
            'end_time': end_time_str,
            'num_extra_words': num_extra_words,
            'unexpected_pauses_count': pause_analysis['unexpected_pauses_count'],
            'wpm': wpm
        }
        return result

    def getAudioTranscript(self, recordedAudio: torch.Tensor = None):
        if recordedAudio is None:
            raise ValueError("recordedAudio must be provided to getAudioTranscript")
        
        current_recorded_audio = self.preprocessAudio(recordedAudio)
        self.asr_model.processAudio(current_recorded_audio, language=self.language)

        audio_transcript, word_locations_processed_tuples = self.getTranscriptAndWordsLocations(
            current_recorded_audio.shape[1]
        )
        # This call will now work correctly
        current_recorded_ipa = self.phonem_converter.convertToPhonem(audio_transcript)
        return audio_transcript, current_recorded_ipa, word_locations_processed_tuples

    def getWordLocationsFromRecordInSeconds(self, word_locations_tuples: list, mapped_words_indices: list) -> tuple:
        start_time_list = []
        end_time_list = []
        valid_indices = [idx for idx in mapped_words_indices if idx >= 0 and idx < len(word_locations_tuples)]
        for mapped_idx in valid_indices:
            loc_tuple = word_locations_tuples[mapped_idx]
            if isinstance(loc_tuple, tuple) and len(loc_tuple) == 2:
                start_time_list.append(float(loc_tuple[0]) / self.sampling_rate)
                end_time_list.append(float(loc_tuple[1]) / self.sampling_rate)
            else:
                print(f"跳过时间转换中格式错误的位置元组: {loc_tuple}")
        return ' '.join([f"{t:.2f}" for t in start_time_list]), ' '.join([f"{t:.2f}" for t in end_time_list])

    def matchSampleAndRecordedWords(self, real_text, recorded_transcript):
        words_estimated = recorded_transcript.split()
        if not words_estimated: words_estimated = []
        if real_text is None: real_text = ""
        words_real = real_text.split()
        if not words_real: words_real = []

        # 核心修复: 调用 self.word_matcher 的方法
        mapped_words, mapped_words_indices = self.word_matcher.get_match_result(words_estimated, words_real)
        
        word_statuses, processed_real_and_transcribed_words, processed_real_and_transcribed_words_ipa = [], [], []

        for word_idx in range(len(words_real)):
            real_word_text = words_real[word_idx]
            transcribed_counterpart = mapped_words[word_idx] if word_idx < len(mapped_words) else '-'
            status = 'omitted' if transcribed_counterpart == '-' else 'matched'
            word_statuses.append({'real_word': real_word_text, 'status': status, 'transcribed_word': transcribed_counterpart})
            processed_real_and_transcribed_words.append((real_word_text, transcribed_counterpart))
            
            # This call will now work correctly
            ipa_real = self.phonem_converter.convertToPhonem(real_word_text)
            ipa_transcribed = self.phonem_converter.convertToPhonem(transcribed_counterpart if status == 'matched' else "")
            processed_real_and_transcribed_words_ipa.append((ipa_real, ipa_transcribed))
        
        num_estimated_words_aligned = sum(1 for mw in mapped_words if mw != '-')
        num_extra_words = max(0, len(words_estimated) - num_estimated_words_aligned)
        return processed_real_and_transcribed_words, processed_real_and_transcribed_words_ipa, mapped_words_indices, word_statuses, num_extra_words

    def getPronunciationAccuracy(self, real_and_transcribed_words_ipa) -> float:
        total_mismatches = 0.
        number_of_phonemes = 0.
        current_words_pronunciation_accuracy = []
        
        # 音素相似性映射 - 基于语音学特征
        phoneme_similarity = {
            # 爆破音组
            ('p', 'b'): 0.3, ('b', 'p'): 0.3,
            ('t', 'd'): 0.3, ('d', 't'): 0.3, 
            ('k', 'g'): 0.3, ('g', 'k'): 0.3,
            # 摩擦音组
            ('f', 'v'): 0.3, ('v', 'f'): 0.3,
            ('θ', 'ð'): 0.3, ('ð', 'θ'): 0.3,
            ('s', 'z'): 0.3, ('z', 's'): 0.3,
            # 元音组
            ('ɪ', 'i'): 0.2, ('i', 'ɪ'): 0.2,
            ('ɛ', 'e'): 0.2, ('e', 'ɛ'): 0.2,
            ('ʌ', 'ə'): 0.2, ('ə', 'ʌ'): 0.2,
            ('ɔ', 'o'): 0.2, ('o', 'ɔ'): 0.2,
        }
        
        for pair_idx, pair in enumerate(real_and_transcribed_words_ipa):
            real_ipa = self.removePunctuation(pair[0]).lower()
            transcribed_ipa = self.removePunctuation(pair[1]).lower()
            
            if not real_ipa:
                word_accuracy = 100.0 if not transcribed_ipa else 0.0
                current_words_pronunciation_accuracy.append(word_accuracy)
                continue
                
            # 使用改进的加权编辑距离
            number_of_word_mismatches = self._weighted_phoneme_distance(
                real_ipa, transcribed_ipa, phoneme_similarity)
            total_mismatches += number_of_word_mismatches
            number_of_phonemes_in_word = len(real_ipa)
            number_of_phonemes += number_of_phonemes_in_word

            if number_of_phonemes_in_word > 0:
                word_accuracy = max(0, float(
                    number_of_phonemes_in_word - number_of_word_mismatches) / number_of_phonemes_in_word * 100)
            else:
                word_accuracy = 0.0 if number_of_word_mismatches > 0 else 100.0
            current_words_pronunciation_accuracy.append(word_accuracy)

        if number_of_phonemes > 0:
            percentage_of_correct_pronunciations = max(0, (number_of_phonemes-total_mismatches)/number_of_phonemes*100)
        else:
            if current_words_pronunciation_accuracy:
                percentage_of_correct_pronunciations = np.mean(current_words_pronunciation_accuracy) if len(current_words_pronunciation_accuracy) > 0 else 100.0
            else:
                percentage_of_correct_pronunciations = 100.0 

        return np.round(percentage_of_correct_pronunciations), current_words_pronunciation_accuracy
    
    def _weighted_phoneme_distance(self, s1: str, s2: str, similarity_map: dict) -> float:
        """
        加权音素编辑距离 - 考虑音素相似性
        """
        m, n = len(s1), len(s2)
        dp = np.zeros((m + 1, n + 1))
        
        # 初始化
        for i in range(m + 1):
            dp[i][0] = i
        for j in range(n + 1):
            dp[0][j] = j
        
        for i in range(1, m + 1):
            for j in range(1, n + 1):
                if s1[i-1] == s2[j-1]:
                    dp[i][j] = dp[i-1][j-1]  # 完全匹配，无代价
                else:
                    # 计算替换代价 - 考虑音素相似性
                    substitution_cost = 1.0
                    phoneme_pair = (s1[i-1], s2[j-1])
                    if phoneme_pair in similarity_map:
                        substitution_cost = similarity_map[phoneme_pair]
                    
                    dp[i][j] = min(
                        dp[i-1][j] + 1,      # 删除
                        dp[i][j-1] + 1,      # 插入
                        dp[i-1][j-1] + substitution_cost  # 替换（加权）
                    )
        
        return dp[m][n]

    def removePunctuation(self, word: str) -> str:
        return ''.join([char for char in word if char not in punctuation])

    def getWordsPronunciationCategory(self, accuracies) -> list:
        categories = []
        for accuracy in accuracies:
            categories.append(
                self.getPronunciationCategoryFromAccuracy(accuracy))
        return categories

    def getPronunciationCategoryFromAccuracy(self, accuracy) -> int:
        return np.argmin(abs(self.categories_thresholds-accuracy))

    def preprocessAudio(self, audio: torch.tensor) -> torch.tensor:
        audio = audio-torch.mean(audio)
        audio_max = torch.max(torch.abs(audio))
        if audio_max > 1e-6:
            audio = audio/audio_max
        return audio

    def analyze_pauses(self, asr_word_locations, long_pause_threshold_seconds=0.7):
        unexpected_pauses_count = 0
        if not asr_word_locations or len(asr_word_locations) < 2:
            return {'unexpected_pauses_count': 0}

        valid_locations = sorted(
            [loc for loc in asr_word_locations if isinstance(loc, dict) and 'start_ts' in loc and 'end_ts' in loc and loc['start_ts'] is not None and loc['end_ts'] is not None],
            key=lambda x: x['start_ts']
        )

        if len(valid_locations) < 2:
            return {'unexpected_pauses_count': 0}

        for i in range(len(valid_locations) - 1):
            current_word_end_ts = valid_locations[i]['end_ts']
            next_word_start_ts = valid_locations[i+1]['start_ts']
            
            if current_word_end_ts is None or next_word_start_ts is None:
                continue

            silence_duration_ticks = next_word_start_ts - current_word_end_ts
            silence_duration_seconds = silence_duration_ticks / self.sampling_rate
            
            if silence_duration_seconds > long_pause_threshold_seconds:
                unexpected_pauses_count += 1
        
        return {'unexpected_pauses_count': unexpected_pauses_count}

    def compute_completeness_score(self, words_real, word_statuses):
        """计算完整度：说出的词数/参考词总数（加权）"""
        if not words_real:
            return 100.0
        
        # 功能词列表 - 对理解不太重要的词
        function_words = {
            'the', 'a', 'an', 'and', 'or', 'but', 'in', 'on', 'at', 'to', 'of', 'for',
            'with', 'by', 'from', 'up', 'about', 'into', 'through', 'during', 'before',
            'after', 'above', 'below', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
            'have', 'has', 'had', 'do', 'does', 'did', 'will', 'would', 'could', 'should'
        }
        
        total_weight = 0
        spoken_weight = 0
        
        for i, word in enumerate(words_real):
            # 计算单词重要性权重
            if word.lower() in function_words:
                weight = 0.5  # 功能词权重较低
            else:
                weight = 1.0  # 内容词权重正常
            
            total_weight += weight
            
            # 检查该词是否被正确说出
            if i < len(word_statuses) and word_statuses[i].get('status') != 'omitted':
                spoken_weight += weight
        
        if total_weight == 0:
            return 100.0
        
        completeness = (spoken_weight / total_weight) * 100
        return min(100.0, max(0.0, completeness))

    def compute_fluency_score(self, wpm, num_extra_words, unexpected_pauses_count):
        """计算流利度：宽松版本，更容易获得高分甚至满分"""
        # 1. 语速评分 - 极其宽松的评分标准
        if wpm <= 0:
            wpm_score = 85  # 即使没有检测到语速也给85分基础分
        elif wpm >= 60 and wpm <= 200:
            wpm_score = 100  # 60-200 WPM 范围内直接给满分
        elif wpm >= 30 and wpm < 60:
            # 30-60 WPM：线性递增到满分
            wpm_score = 85 + (wpm - 30) * 0.5  # 85-100分
        elif wpm > 200 and wpm <= 300:
            # 200-300 WPM：轻微扣分
            wpm_score = 100 - (wpm - 200) * 0.1  # 100-90分
        elif wpm > 300:
            # 超过300 WPM：适度扣分但不会太低
            wpm_score = max(80, 90 - (wpm - 300) * 0.05)
        else:
            # 低于30 WPM：给予基础分数
            wpm_score = max(70, 85 - (30 - wpm) * 0.5)
        
        # 2. 多余词惩罚 - 大幅减少惩罚
        if num_extra_words == 0:
            extra_word_penalty = 0
        elif num_extra_words <= 3:
            extra_word_penalty = num_extra_words * 1  # 每个多余词只扣1分
        elif num_extra_words <= 5:
            extra_word_penalty = 3 + (num_extra_words - 3) * 2  # 轻微递增
        else:
            extra_word_penalty = 7 + (num_extra_words - 5) * 1.5  # 温和递增
        extra_word_penalty = min(15, extra_word_penalty)  # 最多只扣15分
        
        # 3. 停顿惩罚 - 几乎不惩罚
        if unexpected_pauses_count == 0:
            pause_penalty = 0
        elif unexpected_pauses_count <= 2:
            pause_penalty = unexpected_pauses_count * 1  # 每次停顿只扣1分
        elif unexpected_pauses_count <= 4:
            pause_penalty = 2 + (unexpected_pauses_count - 2) * 2  # 轻微递增
        else:
            pause_penalty = 6 + (unexpected_pauses_count - 4) * 1  # 温和递增
        pause_penalty = min(12, pause_penalty)  # 最多只扣12分
        
        # 4. 综合评分 - 添加宽松奖励
        fluency = wpm_score - extra_word_penalty - pause_penalty
        
        # 5. 宽松奖励机制
        if num_extra_words == 0 and unexpected_pauses_count == 0:
            fluency += 5  # 完美表现奖励5分
        elif num_extra_words <= 1 and unexpected_pauses_count <= 1:
            fluency += 3  # 近乎完美奖励3分
        
        # 6. 确保最低分数不会太低
        fluency = max(80, fluency)  # 最低分数80分
        
        return min(100.0, fluency)

    def compute_overall_score(self, completeness, accuracy, fluency):
        """计算总分数：三个维度的加权平均（改进版）"""
        scores = np.array([completeness, accuracy, fluency])
        
        # 动态权重分配 - 基于分数分布调整
        min_score = np.min(scores)
        score_std = np.std(scores)
        
        if min_score < 40:  # 某个维度极低，可能影响整体可信度
            # 保守权重：降低极低维度的负面影响
            weights = [0.35, 0.45, 0.20]  # 更重视准确度
        elif score_std > 25:  # 分数差异很大，可能存在问题
            # 平衡权重：避免单一维度主导
            weights = [0.33, 0.34, 0.33]  # 接近平均
        else:
            # 标准权重：准确度稍微重要一些
            weights = [0.30, 0.45, 0.25]  # 准确度权重最高
        
        # 计算加权平均
        weighted_score = np.sum(scores * weights)
        
        # 应用一致性调整：如果三个维度都很高或都很低，给予奖励/惩罚
        if np.all(scores >= 80):  # 全部高分
            consistency_bonus = min(5, (np.mean(scores) - 80) * 0.2)
            weighted_score += consistency_bonus
        elif np.all(scores <= 50):  # 全部低分  
            consistency_penalty = min(5, (50 - np.mean(scores)) * 0.1)
            weighted_score -= consistency_penalty
        
        return max(0.0, min(100.0, weighted_score)) 