import torch
import numpy as np
import time
import math
import re
from typing import List
import model_wrappers, text_processors, WordMatching as wm
from model_interfaces import IASRModel, ITextToPhonemModel

# --- 工厂函数 ---
def get_gushi_trainer(device: str = None):
    if device is None:
        device = "cuda" if torch.cuda.is_available() else "cpu"
    
    selected_device = torch.device(device)
    print(f"[GushiTrainer] 使用设备: {selected_device}")

    asr_model = model_wrappers.get_asr_model(device=selected_device)
    phonem_converter = text_processors.get_phonem_converter()

    trainer = GushiTrainer(asr_model, phonem_converter, selected_device)
    return trainer

# --- 核心评测类 ---
class GushiTrainer:
    sampling_rate = 16000

    def __init__(self, asr_model: IASRModel, pinyin_converter: ITextToPhonemModel, device: torch.device):
        self.device = device
        self.asr_model = asr_model
        self.pinyin_converter = pinyin_converter

    def evaluate(self, audio_tensor: torch.Tensor, reference_text: str, task: str = 'reading'):
        """
        评测总入口。
        :param audio_tensor: 预处理后的音频张量
        :param reference_text: 参考诗词文本
        :param task: 任务类型, 'reading' (朗读评估) 或 'memorization' (背诵检查)
        :return: 包含评测结果的字典
        """
        try:
            # 1. 音频转录
            self.asr_model.processAudio(audio_tensor, self.sampling_rate)
            transcript = self.asr_model.getTranscript()
            word_locations = self.asr_model.getWordLocations()

            print(f"[DEBUG] 原始转录: '{transcript}'")
            print(f"[DEBUG] 参考文本: '{reference_text}'")

            # 清洗参考文本和识别文本，仅保留汉字
            ref_chars = self._get_chinese_chars(reference_text)
            transcript_chars = self._get_chinese_chars(transcript)

            print(f"[DEBUG] 参考字符: {ref_chars}")
            print(f"[DEBUG] 转录字符: {transcript_chars}")

            # 2. 文本对齐
            # 使用字符列表进行对齐
            mapped_chars, mapped_indices = wm.get_best_mapped_words(transcript_chars, ref_chars)
            
            print(f"[DEBUG] 对齐结果: {mapped_chars}")

            # 3. 根据任务调用不同评测方法
            if task == 'reading':
                return self.reading_assessment(ref_chars, transcript_chars, mapped_chars, word_locations)
            elif task == 'memorization':
                return self.memorization_check(ref_chars, transcript_chars, mapped_chars)
            else:
                raise ValueError(f"未知的任务类型: {task}")
                
        except Exception as e:
            print(f"[ERROR] 评测过程发生错误: {e}")
            import traceback
            traceback.print_exc()
            # 返回默认错误结果
            return {
                'task': task,
                'error': str(e),
                'overall_score': 0,
                'accuracy': 0,
                'completeness': 0,
                'fluency': 0,
                'details': {
                    'transcript': '',
                    'word_statuses': [],
                }
            }

    def reading_assessment(self, ref_chars, transcript_chars, mapped_chars, word_locations):
        """进行朗读水平评估"""
        # 1. 准确度 (Accuracy) - 先计算这个，因为我们需要word_accuracies来进行四级评价
        accuracy, word_accuracies = self._compute_pronunciation_accuracy(ref_chars, mapped_chars)
        
        # 2. 准备数据 - 传入word_accuracies进行四级评价
        word_statuses = self._get_word_statuses(ref_chars, mapped_chars, word_accuracies)

        # 3. 完整度 (Completeness)
        completeness = self._compute_completeness_score(word_statuses)

        # 4. 流利度 (Fluency)
        wpm, pauses = self._analyze_fluency(word_locations)
        fluency = self._compute_fluency_score(wpm, transcript_chars, ref_chars, pauses)
        
        # 5. 总分
        overall_score = self._compute_overall_score(completeness, accuracy, fluency)
        
        # 6. 统计四级评价分布
        grade_stats = self._compute_grade_statistics(word_statuses)

        return {
            'task': '朗读评测',
            'overall_score': overall_score,
            'accuracy': accuracy,
            'completeness': completeness,
            'fluency': fluency,
            'details': {
                'wpm': wpm,
                'unexpected_pauses': pauses,
                'transcript': "".join(transcript_chars),
                'word_statuses': word_statuses,
                'word_accuracies': word_accuracies,
                'grade_statistics': grade_stats  # 新增：四级评价统计
            }
        }

    def memorization_check(self, ref_chars, transcript_chars, mapped_chars):
        """进行背诵准确度检查"""
        # 为背诵测评也计算准确度分数，用于四级评价
        _, word_accuracies = self._compute_pronunciation_accuracy(ref_chars, mapped_chars)
        word_statuses = self._get_word_statuses(ref_chars, mapped_chars, word_accuracies)
        
        omissions = sum(1 for s in word_statuses if s['status'] == 'omitted')
        substitutions = sum(1 for s in word_statuses if s['status'] == 'substituted')
        
        # 增字 = 识别出的总字数 - 匹配上的字数
        insertions = len(transcript_chars) - (len(ref_chars) - omissions)
        insertions = max(0, insertions)

        total_chars = len(ref_chars)
        correct_chars = total_chars - omissions - substitutions
        memorization_accuracy = (correct_chars / total_chars) * 100 if total_chars > 0 else 0
        
        # 统计四级评价分布
        grade_stats = self._compute_grade_statistics(word_statuses)

        return {
            'task': '背诵检查',
            'memorization_accuracy': memorization_accuracy,
            'details': {
                'total_chars': total_chars,
                'correct_chars': correct_chars,
                'omissions': omissions,      # 漏字
                'insertions': insertions,    # 增字
                'substitutions': substitutions, # 错字
                'transcript': "".join(transcript_chars),
                'word_statuses': word_statuses,
                'grade_statistics': grade_stats  # 新增：四级评价统计
            }
        }

    # --- 私有辅助方法 ---
    def _get_chinese_chars(self, text: str) -> List[str]:
        """从字符串中提取所有汉字字符并返回列表"""
        if not text:
            return []
        return re.findall(r'[\u4e00-\u9fa5]', text)

    def _get_word_statuses(self, ref_chars, mapped_chars, word_accuracies=None):
        """
        获取每个字的状态，包含四级评价体系
        :param ref_chars: 参考字符列表
        :param mapped_chars: 对齐后的字符列表  
        :param word_accuracies: 每个字的准确度分数列表
        :return: 包含详细状态的列表
        """
        statuses = []
        for i, ref_char in enumerate(ref_chars):
            mapped_char = mapped_chars[i]
            
            # 基础状态判断
            if mapped_char == wm.WORD_NOT_FOUND_TOKEN:
                status = "omitted"  # 漏读
                grade = "Error/Missing"
                score = 0
            elif mapped_char != ref_char:
                status = "substituted"  # 错字/替换
                grade = "Error/Missing"
                score = 0
            else:
                status = "matched"  # 匹配
                # 根据准确度分数划分等级
                if word_accuracies and i < len(word_accuracies):
                    score = word_accuracies[i]
                    if score >= 90:
                        grade = "Excellent"
                    elif score >= 75:
                        grade = "Good"
                    elif score >= 60:
                        grade = "Average"
                    else:
                        grade = "Error/Missing"
                else:
                    score = 100  # 默认完全匹配为100分
                    grade = "Excellent"
            
            statuses.append({
                'ref_char': ref_char,
                'transcript_char': mapped_char if mapped_char != wm.WORD_NOT_FOUND_TOKEN else "-",
                'status': status,
                'grade': grade,  # 新增：四级评价
                'score': round(score, 1)  # 新增：具体分数
            })
        return statuses

    def _compute_pronunciation_accuracy(self, ref_chars, mapped_chars):
        """计算发音准确度"""
        # 定义拼音相似度权重
        phoneme_similarity = {
            ('z', 'zh'): 0.3, ('c', 'ch'): 0.3, ('s', 'sh'): 0.3,
            ('zh', 'z'): 0.3, ('ch', 'c'): 0.3, ('sh', 's'): 0.3,
            ('n', 'l'): 0.4, ('l', 'n'): 0.4,
            ('f', 'h'): 0.4, ('h', 'f'): 0.4,
            ('an', 'ang'): 0.3, ('ang', 'an'): 0.3,
            ('en', 'eng'): 0.3, ('eng', 'en'): 0.3,
            ('in', 'ing'): 0.3, ('ing', 'in'): 0.3,
        }
        
        word_accuracies = []
        total_distance = 0
        total_length = 0

        for i, ref_char in enumerate(ref_chars):
            mapped_char = mapped_chars[i]

            if mapped_char == wm.WORD_NOT_FOUND_TOKEN:
                word_accuracies.append(0)
                continue
            
            try:
                p_ref = self.pinyin_converter.convertToPhonem(ref_char)
                p_map = self.pinyin_converter.convertToPhonem(mapped_char)
                
                if not p_ref or not p_map:
                    word_accuracies.append(0)
                    continue
            except Exception as e:
                print(f"[ERROR] 拼音转换失败: {e}, ref_char='{ref_char}', mapped_char='{mapped_char}'")
                word_accuracies.append(0)
                continue

            # 分解声母、韵母、声调
            r_s, r_y, r_d = self._split_pinyin(p_ref)
            m_s, m_y, m_d = self._split_pinyin(p_map)

            # 计算加权距离
            dist = 0
            dist += phoneme_similarity.get((r_s, m_s), 1 if r_s != m_s else 0)
            dist += phoneme_similarity.get((r_y, m_y), 1 if r_y != m_y else 0)
            dist += 0.5 if r_d != m_d else 0 # 声调错误扣0.5分

            max_len = 2.5 # 声母(1) + 韵母(1) + 声调(0.5)
            word_acc = max(0, (max_len - dist) / max_len) * 100
            word_accuracies.append(word_acc)

            total_distance += dist
            total_length += max_len
        
        overall_accuracy = max(0, (total_length - total_distance) / total_length) * 100 if total_length > 0 else 0
        return overall_accuracy, word_accuracies

    def _split_pinyin(self, pinyin_str: str) -> (str, str, str):
        """将 'hao3' 分解为 ('h', 'ao', '3')"""
        if not pinyin_str:
            return "", "", ""

        shengmu_list = ['b','p','m','f','d','t','n','l','g','k','h','j','q','x','r','z','c','s','zh','ch','sh','y','w']
        shengmu = ""
        yunmu_tone = ""

        if len(pinyin_str) > 1 and pinyin_str[:2] in shengmu_list:
            shengmu = pinyin_str[:2]
            yunmu_tone = pinyin_str[2:]
        elif len(pinyin_str) > 0 and pinyin_str[0] in shengmu_list:
            shengmu = pinyin_str[0]
            yunmu_tone = pinyin_str[1:]
        else:
            yunmu_tone = pinyin_str
        
        tone = ""
        yunmu = yunmu_tone
        if yunmu_tone and yunmu_tone[-1].isdigit():
            tone = yunmu_tone[-1]
            yunmu = yunmu_tone[:-1]
            
        return shengmu, yunmu, tone

    def _compute_grade_statistics(self, word_statuses):
        """
        统计四级评价的分布情况
        :param word_statuses: 字符状态列表
        :return: 四级评价统计字典
        """
        stats = {
            "Excellent": 0,
            "Good": 0, 
            "Average": 0,
            "Error/Missing": 0
        }
        
        total_chars = len(word_statuses)
        
        for status in word_statuses:
            grade = status.get('grade', 'Error/Missing')
            if grade in stats:
                stats[grade] += 1
        
        # 计算百分比
        percentages = {}
        for grade, count in stats.items():
            percentages[grade] = {
                "count": count,
                "percentage": round((count / total_chars) * 100, 1) if total_chars > 0 else 0
            }
        
        return {
            "total_chars": total_chars,
            "grade_distribution": percentages,
            "summary": f"Excellent: {stats['Excellent']}字, Good: {stats['Good']}字, Average: {stats['Average']}字, Error/Missing: {stats['Error/Missing']}字"
        }

    def _compute_completeness_score(self, word_statuses):
        """计算完整度。在诗词中，每个字都重要，所以简化为 遗漏字数/总字数"""
        total_chars = len(word_statuses)
        omitted = sum(1 for s in word_statuses if s['status'] == 'omitted')
        completeness = ((total_chars - omitted) / total_chars) * 100 if total_chars > 0 else 0
        return completeness

    def _analyze_fluency(self, word_locations):
        """分析流利度，返回 WPM 和不当停顿次数"""
        if not word_locations:
            # 如果没有词位置信息，给一个合理的默认值
            print("[DEBUG] 没有词位置信息，使用默认WPM")
            return 150, 0  # 默认150字/分钟，无停顿

        # 计算WPM (Words Per Minute, 这里是字/分钟)
        num_words = len(word_locations)
        
        try:
            first_word_start = word_locations[0]['start_ts']
            last_word_end = word_locations[-1]['end_ts']
            duration_sec = (last_word_end - first_word_start) / self.sampling_rate
            
            print(f"[DEBUG] 音频时长: {duration_sec:.2f}秒, 字数: {num_words}")
            
            if duration_sec <= 0:
                # 时长异常，使用估算方法
                print("[DEBUG] 时长异常，使用估算WPM")
                estimated_wpm = 150  # 给一个合理的默认值
            else:
                estimated_wpm = (num_words / duration_sec) * 60
                
            # 限制WPM在合理范围内 (50-400字/分钟)
            wpm = max(50, min(400, estimated_wpm))
            
        except (KeyError, IndexError, TypeError) as e:
            print(f"[DEBUG] WPM计算出错: {e}，使用默认值")
            wpm = 150

        # 检测不当停顿
        pauses = 0
        try:
            for i in range(len(word_locations) - 1):
                pause_duration = (word_locations[i+1]['start_ts'] - word_locations[i]['end_ts']) / self.sampling_rate
                # 诗句之间允许较长停顿，词语之间不应超过1.2秒（放宽标准）
                if pause_duration > 1.2:
                    pauses += 1
        except (KeyError, IndexError, TypeError):
            pauses = 0
        
        print(f"[DEBUG] 计算得到 WPM: {wpm}, 停顿: {pauses}")
        return round(wpm), pauses

    def _compute_fluency_score(self, wpm, transcript_chars, ref_chars, pauses):
        """计算流利度分数（更宽松的评分标准）"""
        print(f"[DEBUG] 流利度评分输入: WPM={wpm}, 停顿={pauses}, 转录字数={len(transcript_chars)}, 参考字数={len(ref_chars)}")
        
        # 更宽松的理想语速范围 (字/分钟) 
        WPM_LOW = 80    # 降低最低要求 (原120)
        WPM_HIGH = 300  # 提高最高容忍度 (原250)
        
        # 语速得分 - 更宽松的评分
        if wpm < WPM_LOW:
            # 即使很慢，也给基础分
            wpm_score = max(60, (wpm / WPM_LOW) * 90)  # 最低60分，最高90分
        elif wpm > WPM_HIGH:
            # 过快的惩罚减少
            wpm_score = max(70, 100 - (wpm - WPM_HIGH) / 20)  # 最低70分
        else:
            # 在理想范围内给高分
            wpm_score = 100
        
        # 停顿惩罚减少
        pause_penalty = pauses * 5  # 减少惩罚 (原10)
        
        # 增字/减字惩罚减少  
        extra_word_penalty = abs(len(transcript_chars) - len(ref_chars)) * 2  # 减少惩罚 (原5)

        # 计算最终分数，给一个最低保障分
        fluency_score = max(60, wpm_score - pause_penalty - extra_word_penalty)
        
        print(f"[DEBUG] 流利度评分详情: 语速分={wpm_score}, 停顿惩罚={pause_penalty}, 增减字惩罚={extra_word_penalty}, 最终分={fluency_score}")
        return fluency_score

    def _compute_overall_score(self, completeness, accuracy, fluency):
        """计算总分。可根据业务调整权重。"""
        # 权重: 准确度 > 完整度 > 流利度
        weights = {'accuracy': 0.5, 'completeness': 0.3, 'fluency': 0.2}
        
        score = (accuracy * weights['accuracy'] + 
                 completeness * weights['completeness'] + 
                 fluency * weights['fluency'])
        return score 