import sys # Keep for now, might be needed by dependencies
import os
import torch
import numpy as np
from torchaudio.transforms import Resample
import audioread
import argparse
import time

# Removed sys.path modification as all files will be in the same directory

import pronunciationTrainer # This will be a local import
# WordMatching as wm # Not directly used in this simplified demo output part

# Copied from lambdaSpeechToScore.py (or original demo_eval.py)
def buf_to_float(x, n_bytes=2, dtype=np.float32):
    scale = 1.0 / float(1 << ((8 * n_bytes) - 1))
    fmt = "<i{:d}".format(n_bytes)
    return scale * np.frombuffer(x, fmt).astype(dtype)

def audioread_load(path, offset=0.0, duration=None, dtype=np.float32):
    y = []
    try:
        with audioread.audio_open(path) as input_file:
            sr_native = input_file.samplerate
            n_channels = input_file.channels
            s_start = int(np.round(sr_native * offset)) * n_channels
            if duration is None:
                s_end = np.inf
            else:
                s_end = s_start + (int(np.round(sr_native * duration)) * n_channels)
            n = 0
            for frame in input_file:
                frame = buf_to_float(frame, dtype=dtype)
                n_prev = n
                n = n + len(frame)
                if n < s_start:
                    continue
                if s_end < n_prev:
                    break
                if s_end < n:
                    frame = frame[:s_end - n_prev]
                if n_prev <= s_start <= n:
                    frame = frame[(s_start - n_prev):]
                y.append(frame)
    except audioread.NoBackendError as e:
        print(f"Error opening audio file {path}: No backend available. Ensure ffmpeg is installed and in PATH.")
        raise e
    except Exception as e:
        print(f"An unexpected error occurred while reading {path}: {e}")
        raise e

    if not y:
        print(f"Warning: Audio file {path} seems to be empty or could not be read.")
        return np.empty(0, dtype=dtype), 0

    y_concat = np.concatenate(y)
    if n_channels > 1:
        y_concat = y_concat.reshape((-1, n_channels)).T
        y_concat = y_concat[0] # Select first channel for mono
    
    if 'sr_native' not in locals():
        print(f"Warning: Could not determine sample rate for {path}. Assuming 0, which will likely cause issues.")
        sr_native = 0 

    return y_concat, sr_native


def evaluate_pronunciation(audio_file_path, reference_text, language="en"):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(f"将使用设备: {device}\n")

    print(f"评估文件: {audio_file_path}")
    print(f"参考文本: \"{reference_text}\"")
    print(f"语言: {language}\n")

    try:
        signal_np, fs_orig = audioread_load(audio_file_path)
        if fs_orig == 0 and signal_np.size == 0: 
             print("错误: 无法加载音频或音频为空。")
             return None

        if signal_np.ndim == 0: 
            signal_np = np.array([signal_np.item()])

        signal_torch = torch.from_numpy(signal_np.astype(np.float32))

        if fs_orig != 16000:
            print(f"原始采样率: {fs_orig} Hz, 重采样至 16000 Hz...")
            resampler = Resample(orig_freq=fs_orig, new_freq=16000)
            signal_torch = resampler(signal_torch)
        
        if signal_torch.ndim == 0: 
             signal_torch = signal_torch.unsqueeze(0) 
        if signal_torch.ndim == 1:
            audio_tensor = signal_torch.unsqueeze(0) 
        elif signal_torch.ndim == 2 and signal_torch.shape[0] == 1:
            audio_tensor = signal_torch 
        elif signal_torch.ndim == 2 and signal_torch.shape[0] > 1:
             print(f"警告: 音频似乎是多通道 ({signal_torch.shape[0]} 通道), 将使用第一个通道。")
             audio_tensor = signal_torch[0, :].unsqueeze(0) 
        else:
            print(f"错误: 音频张量形状处理后不符合预期: {signal_torch.shape}")
            return None
        
        print(f"处理后的音频张量形状: {audio_tensor.shape}")

        print("正在加载评估模型...")
        start_load_time = time.time()
        trainer = pronunciationTrainer.getTrainer(language, device=device)
        end_load_time = time.time()
        print(f"模型加载完毕。耗时: {end_load_time - start_load_time:.2f} 秒")

        print("正在进行发音评估...")
        start_eval_time = time.time()
        result = trainer.processAudioForGivenText(audio_tensor, reference_text)
        end_eval_time = time.time()
        processing_duration = end_eval_time - start_eval_time
        print(f"评估完成。耗时: {processing_duration:.2f} 秒\n")

        print(f"--- \"{reference_text}\" 的发音评估结果 ---")
        print(f"ASR 系统识别的文本: \"{result.get('recording_transcript', 'N/A')}\"")
        print(f"整体发音准确度: {result.get('pronunciation_accuracy', 'N/A')}%" )
        print(f"语速 (WPM): {result.get('wpm', 'N/A')}")
        print(f"多余词数: {result.get('num_extra_words', 'N/A')}")
        print(f"意外停顿次数: {result.get('unexpected_pauses_count', 'N/A')}")
        # print(f"处理时长: {processing_duration:.2f} 秒") # Already printed above
        
        # 新增：显示三个维度分数和总分数
        print(f"\n=== 综合评估分数 ===")
        print(f"完整度 (Completeness): {result.get('completeness', 'N/A'):.1f}%")
        print(f"准确度 (Accuracy): {result.get('accuracy', 'N/A'):.1f}%") 
        print(f"流利度 (Fluency): {result.get('fluency', 'N/A'):.1f}%")
        print(f"总分数 (Overall Score): {result.get('overall_score', 'N/A'):.1f}%")

        word_accuracies = result.get('word_accuracies', [])
        pronunciation_categories = result.get('pronunciation_categories', [])
        word_statuses = result.get('word_statuses', []) 
        
        category_map = {0: "优秀 (Good)", 1: "良好 (Okay)", 2: "需改进 (Needs Improvement)"}

        if word_statuses: 
            print("\n详细单词评估:")
            omitted_count = 0
            for i, status_info in enumerate(word_statuses):
                real_word = status_info['real_word']
                transcribed_word = status_info['transcribed_word']
                status = status_info['status']
                
                word_info = f"  参考词: '{real_word}'"
                if status == 'omitted':
                    word_info += " -> (遗漏)"
                    omitted_count += 1
                else: # matched
                    word_info += f" -> 识别词: '{transcribed_word}'"
                
                if i < len(word_accuracies): 
                    word_info += f" | 单词准确度: {word_accuracies[i]:.2f}%"
                if i < len(pronunciation_categories): 
                    word_info += f" | 类别: {category_map.get(pronunciation_categories[i], 'N/A')}"
                print(word_info)
            
            if omitted_count > 0:
                print(f"\n总计遗漏单词数: {omitted_count}")
        
        return result

    except FileNotFoundError:
        print(f"错误: 音频文件 '{audio_file_path}' 未找到。")
    except audioread.NoBackendError:
        print(f"错误: 读取音频文件 '{audio_file_path}' 失败。请确保 ffmpeg 已正确安装并添加到系统 PATH。")
    except Exception as e:
        print(f"评估过程中发生未知错误: {e}")
        import traceback
        traceback.print_exc()


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="发音评估演示脚本")
    parser.add_argument("audio_file", type=str, help="音频文件路径 (例如: recording.wav)")
    parser.add_argument("reference_text", type=str, help="参考文本 (例如: 'hello world')")
    parser.add_argument("--lang", type=str, default="en", choices=["en", "de", "zh"], help="语言代码 ('en', 'de', 'zh')") # Added zh
    
    args = parser.parse_args()
    
    evaluate_pronunciation(args.audio_file, args.reference_text, args.lang) 