import subprocess
import re
import uuid
import subprocess
from fastapi import FastAPI, HTTPException, BackgroundTasks, UploadFile, File
from fastapi.responses import FileResponse, JSONResponse
from pydantic import BaseModel
import os
import sys
import tempfile
import soundfile as sf
import numpy as np
import datetime
import time
import logging
import whisper
from typing import List, Dict, Any, Optional

# 添加项目根目录到Python路径（添加到开头以优先使用本地版本）
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)

# 设置eSpeak NG环境变量
espeak_path = r"C:\Program Files\eSpeak NG"  # 使用原始字符串避免转义问题
espeak_exe = os.path.join(espeak_path, "espeak-ng.exe")

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 验证eSpeak NG是否存在于指定路径
if not os.path.isfile(espeak_exe):
    print(f"警告: 未在 {espeak_exe} 找到eSpeak NG可执行文件")
    logger.warning(f"未在 {espeak_exe} 找到eSpeak NG可执行文件")

# 设置环境变量
os.environ["PHONEMIZER_ESPEAK_PATH"] = espeak_exe
if espeak_path not in os.environ["PATH"]:
    os.environ["PATH"] += ";" + espeak_path
    print(f"已将eSpeak路径添加到系统PATH: {espeak_path}")
    logger.info(f"已将eSpeak路径添加到系统PATH: {espeak_path}")

# 确保使用国内镜像源
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 初始化FastAPI应用
app = FastAPI(title="Kitten TTS API (eSpeak)", description="Kitten TTS语音合成API服务 (使用eSpeak作为后端)", version="1.0")

# 定义TTS请求模型
class TTSRequest(BaseModel):
    text: str
    voice: str = "default"
    speed: float = 1.0
    language: str = "en-us"
    stress_pattern: Optional[str] = None  # 重音模式字符串，如"'hello"表示第一个音节重读
    syllable_variation: Optional[float] = None  # 音节变化参数，控制变化程度（0.0-1.0）
    use_default_pitch_variation: bool = False  # 是否使用默认音调变化规则

# 定义拼读请求模型
class PhoneticsRequest(BaseModel):
    text: str

# 定义拼读语音请求模型
class SpellingAudioRequest(BaseModel):
    word: str
    voice: str = "default"
    speed: float = 1.0

# 定义高级单词拼读请求模型
class AdvancedSpellingRequest(BaseModel):
    word: str
    mode: str = "letter_by_letter"  # letter_by_letter, letter_name, phonemic, combination, crfsuite
    with_pronunciation: bool = False
    letter_separator: str = " "
    with_word_pronunciation: bool = False
    voice: str = "default"
    speed: float = 1.0
    emphasis: Optional[int] = None  # 重音强度（0-5），None则使用默认值
    stress_pattern: Optional[str] = None  # 重音模式字符串，如"'hello"表示第一个音节重读
    letter_gap: Optional[int] = 100  # 字母间的停顿时长（毫秒）

class PhonicsSpeaker:
    def __init__(self):
        self.rate = 150  # 语速
        self.pitch = 50  # 音调
        self.volume = 100  # 音量 (0-200)
        self.word_gap = 10  # 单词间隔 (以10毫秒为单位)
        self.emphasis = 2  # 句子重音增强度 (0-5)
        self.voice = "default"  # 默认语音
        self.audio_output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "output_audio")
        os.makedirs(self.audio_output_dir, exist_ok=True)
        
    def text_to_phonemes(self, text):
        """将文本转换为音素"""
        # 更完善的音素映射规则
        phoneme_map = {
            # 单个字母的常见发音
            'a': 'eɪ', 'b': 'b iː', 'c': 's iː', 'd': 'd iː', 'e': 'iː',
            'f': 'ɛ f', 'g': 'dʒ iː', 'h': 'eɪ tʃ', 'i': 'aɪ', 'j': 'dʒ eɪ',
            'k': 'k eɪ', 'l': 'ɛ l', 'm': 'ɛ m', 'n': 'ɛ n', 'o': 'oʊ',
            'p': 'p iː', 'q': 'k juː', 'r': 'ɑːr', 's': 'ɛ s', 't': 't iː',
            'u': 'juː', 'v': 'v iː', 'w': 'dʌbəl juː', 'x': 'ɛ ks', 'y': 'waɪ', 'z': 'z iː',
            # 常见单词的发音
            'cat': 'k æ t', 'dog': 'd ɒ g', 'fish': 'f ɪ ʃ',
            'the': 'ð ə', 'this': 'ð ɪ s', 'that': 'ð æ t',
            'hello': 'h ə ˈloʊ', 'world': 'w ɜːr l d'
        }
        
        words = text.lower().split()
        phonemes = []
        for word in words:
            if word in phoneme_map:
                phonemes.extend(phoneme_map[word].split())
            else:
                # 将单词按字母分开拼读
                for char in word:
                    if char.isalpha() and char.lower() in phoneme_map:
                        phonemes.extend(phoneme_map[char.lower()].split())
                    else:
                        phonemes.append(char)  # 保留非字母字符
        return phonemes
    
    def spell_word(self, word: str, output_file: Optional[str] = None, 
                   letter_gap: Optional[int] = 100, emphasis: Optional[int] = None, 
                   stress_pattern: Optional[str] = None, voice: Optional[str] = None) -> str:
        """按字母拼读单词并生成音频文件，支持重音和停顿时长控制
        
        Args:
            word: 要拼读的单词
            output_file: 输出音频文件名，如果为None则自动生成
            letter_gap: 字母间的停顿时长（毫秒），默认为100
            emphasis: 重音强度（0-5），None则使用默认值
            stress_pattern: 重音模式字符串，如"'hello"表示第一个音节重读
        
        Returns:
            生成的音频文件路径
        """
        try:
            # 只保留字母和数字
            filtered_chars = [char for char in word if char.isalnum()]
            
            # 应用音调变化的拼读文本生成
            if stress_pattern:
                # 使用指定的重音模式
                spelled_text = self._apply_stress_pattern(filtered_chars, stress_pattern)
            else:
                # 如果没有提供重音模式，使用默认的音调变化规则
                spelled_text = self._apply_default_pitch_variation(filtered_chars)
            
            # 如果没有提供输出文件名，生成一个唯一的文件名
            if output_file is None:
                output_file = self._generate_unique_filename(f"spelled_{word[:10]}")
                output_path = os.path.join(self.audio_output_dir, output_file)
            else:
                # 如果提供了文件名但不是绝对路径，则保存到默认目录
                if not os.path.isabs(output_file):
                    output_path = os.path.join(self.audio_output_dir, output_file)
                else:
                    output_path = output_file
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 检查系统环境中配置的eSpeak路径
            global espeak_exe
            if 'espeak_exe' in globals() and os.path.isfile(espeak_exe):
                espeak_command = espeak_exe
            else:
                espeak_command = 'espeak'
            
            # 构建eSpeak命令参数
            command_args = [
                espeak_command,
                '-s', str(self.rate),
                '-p', str(self.pitch),  # 基础音调
                '-a', str(self.volume),
                '-g', str(letter_gap // 10)  # eSpeak的-g参数以10毫秒为单位
            ]
            
            # 添加语音参数
            # 映射用户友好的语音名称到实际的eSpeak语音ID
            voice_to_use = None
            if voice and voice != "default":
                voice_to_use = voice
            elif self.voice and self.voice != "default":
                voice_to_use = self.voice
            
            # 语音映射：将常用的voice名称映射到系统上实际存在的语音ID（来自终端输出）
            # 使用不需要mbrola支持的标准语音，避免缺少mbrola.dll文件导致的错误
            voice_mapping = {
                'female': 'en-gb',   # 女性化语音（英国英语，系统可用ID:2）
                'male': 'en-us',     # 男性化语音（美国英语，系统可用ID:2）
                'newyork': 'en-us-nyc' # 纽约市英语（系统可用ID:5）
            }
            
            # 如果是映射表中的语音名称，则使用对应的语音ID
            if voice_to_use in voice_mapping:
                command_args.extend(['-v', voice_mapping[voice_to_use]])
            elif voice_to_use:
                # 否则直接使用提供的语音ID
                command_args.extend(['-v', voice_to_use])
            
            # 如果指定了重音强度，添加-k参数
            if emphasis is not None:
                command_args.extend(['-k', str(max(0, min(5, emphasis)))])  # 确保在0-5范围内
            
            # 添加输出文件和文本参数
            command_args.extend(['-w', output_path, spelled_text])
            
            # 使用espeak生成音频文件
            if os.name == 'nt':  # Windows系统
                subprocess.run(command_args, check=True)
            else:
                subprocess.run(command_args, check=True)
            
            print(f"已生成单词拼读音频文件: {output_path}")
            print(f"拼读参数 - 字母间隔: {letter_gap}ms, 重音强度: {emphasis if emphasis is not None else '默认'}, 音调变化: {'已启用' if stress_pattern or len(filtered_chars) > 1 else '默认'}")
            logger.info(f"已生成单词拼读音频文件: {output_path}，参数: 字母间隔={letter_gap}ms, 重音强度={emphasis if emphasis is not None else '默认'}, 音调变化={'已启用' if stress_pattern or len(filtered_chars) > 1 else '默认'}")
            return output_path
            
        except Exception as e:
            error_msg = f"生成单词拼读音频时发生错误: {str(e)}"
            print(error_msg)
            logger.error(error_msg)
            raise Exception(error_msg)
    
    def _apply_stress_pattern(self, chars: list, stress_pattern: str) -> str:
        """应用指定的重音模式到字符列表
        
        eSpeak支持多种SSML标记来控制音调，这里我们使用音调变化标记
        例如：[x50] 提高音调, [x-50] 降低音调
        
        Args:
            chars: 字符列表
            stress_pattern: 重音模式字符串
        
        Returns:
            添加了音调标记的拼读文本
        """
        # 解析重音模式，这里简单实现几个常见模式
        # ' 表示重音在第一个音节
        # , 表示重音在第二个音节
        
        if not chars:
            return ''
            
        # 构建带音调标记的文本
        result = []
        if stress_pattern.startswith("'"):
            # 第一个音节重读
            for i, char in enumerate(chars):
                if i == 0:
                    # 第一个字母提高音调
                    result.append(f'[x20]{char}[x0]')
                else:
                    result.append(char)
        elif stress_pattern.startswith(","):
            # 第二个音节重读
            for i, char in enumerate(chars):
                if i == 1 and len(chars) > 1:
                    # 第二个字母提高音调
                    result.append(f'[x20]{char}[x0]')
                else:
                    result.append(char)
        else:
            # 如果模式不匹配，回退到默认的音调变化
            result = self._apply_default_pitch_variation(chars, use_ssml=True).split()
            
        return ' '.join(result)
        
    def _apply_default_pitch_variation(self, chars: list, use_ssml: bool = False) -> str:
        """应用默认的音调变化规则到字符列表
        
        对于较长的单词，在不同位置使用不同的音调变化
        
        Args:
            chars: 字符列表
            use_ssml: 是否使用SSML标记（用于内部调用）
        
        Returns:
            添加了音调变化的拼读文本
        """
        if not chars:
            return ''
            
        if use_ssml:
            # 内部调用时使用SSML标记
            result = []
            for i, char in enumerate(chars):
                # 创建简单的音调变化模式
                if len(chars) > 2:
                    if i == 0 or i == len(chars) - 1:
                        # 第一个和最后一个字母使用较高音调
                        result.append(f'[x10]{char}[x0]')
                    elif i == len(chars) // 2:
                        # 中间字母使用最高音调
                        result.append(f'[x20]{char}[x0]')
                    else:
                        result.append(char)
                else:
                    # 短单词简单变化
                    result.append(f'[x{10 if i % 2 == 0 else 0}]{char}[x0]')
            return ' '.join(result)
        else:
            # 外部调用时使用espeak的音调变化标记
            result = []
            for i, char in enumerate(chars):
                # 根据位置添加音调变化
                if i > 0:
                    result.append(' ')
                result.append(char)
                # 对于较长单词，在适当位置添加音调变化
                if len(chars) > 3 and (i == 1 or i == len(chars) - 2):
                    result.append('^')  # ^ 表示音调上升
                elif len(chars) > 2 and i == len(chars) - 1:
                    result.append('_')  # _ 表示音调下降
            return ''.join(result)
            
    def _generate_unique_filename(self, prefix: str = "audio") -> str:
        """生成唯一的文件名"""
        timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
        unique_id = uuid.uuid4().hex[:6]
        return f"{prefix}_{timestamp}_{unique_id}.wav"
    
    def speak_text(self, text: str, output_file: Optional[str] = None, voice: Optional[str] = None) -> str:
        """将文本转换为音频文件
        
        Args:
            text: 要转换的文本
            output_file: 输出音频文件名，如果为None则自动生成
        
        Returns:
            生成的音频文件路径
        """
        try:
            # 检查系统环境中配置的eSpeak路径
            global espeak_exe
            if 'espeak_exe' in globals() and os.path.isfile(espeak_exe):
                espeak_command = espeak_exe
                print(f"使用配置的eSpeak路径: {espeak_command}")
            else:
                # 如果全局配置不可用，尝试使用系统PATH中的espeak
                espeak_command = 'espeak'
                print("使用系统PATH中的eSpeak")
            
            # 如果没有提供输出文件名，生成一个唯一的文件名
            if output_file is None:
                output_file = self._generate_unique_filename(f"text")
                output_path = os.path.join(self.audio_output_dir, output_file)
            else:
                # 如果提供了文件名但不是绝对路径，则保存到默认目录
                if not os.path.isabs(output_file):
                    output_path = os.path.join(self.audio_output_dir, output_file)
                else:
                    output_path = output_file
            
            # 确保输出目录存在
            os.makedirs(os.path.dirname(output_path), exist_ok=True)
            
            # 使用espeak生成音频文件
            # 构建命令参数
            command_args = [
                espeak_command, 
                '-s', str(self.rate), 
                '-p', str(self.pitch), 
                '-w', output_path
            ]
            
            # 添加语音参数
            # 映射用户友好的语音名称到实际的eSpeak语音ID
            voice_to_use = None
            if voice and voice != "default":
                voice_to_use = voice
            elif self.voice and self.voice != "default":
                voice_to_use = self.voice
            
            # 语音映射：将常用的voice名称映射到系统上实际存在的语音ID（来自终端输出）
            # 使用不需要mbrola支持的标准语音，避免缺少mbrola.dll文件导致的错误
            voice_mapping = {
                'female': 'en-gb',   # 女性化语音（英国英语，系统可用ID:2）
                'male': 'en-us',     # 男性化语音（美国英语，系统可用ID:2）
                'newyork': 'en-us-nyc' # 纽约市英语（系统可用ID:5）
            }
            
            # 如果是映射表中的语音名称，则使用对应的语音ID
            if voice_to_use in voice_mapping:
                command_args.extend(['-v', voice_mapping[voice_to_use]])
            elif voice_to_use:
                # 否则直接使用提供的语音ID
                command_args.extend(['-v', voice_to_use])
            
            # 添加文本
            command_args.append(text)
            
            # 根据操作系统调整命令格式
            subprocess.run(command_args, check=True)
            
            print(f"已生成音频文件: {output_path}")
            logger.info(f"已生成音频文件: {output_path}")
            return output_path
            
        except FileNotFoundError:
            # 增强的错误信息
            config_path_info = f" (配置路径: {espeak_exe})" if 'espeak_exe' in globals() else ""
            error_msg = f"未找到eSpeak{config_path_info}，请先安装eSpeak NG并确保路径正确"
            print(error_msg)
            logger.error(error_msg)
            raise FileNotFoundError(error_msg)
        except subprocess.CalledProcessError as e:
            error_msg = f"eSpeak执行失败: {str(e)}"
            print(error_msg)
            logger.error(error_msg)
            raise Exception(error_msg)
        except Exception as e:
            # 捕获其他可能的异常
            error_msg = f"生成音频文件时发生错误: {str(e)}"
            print(error_msg)
            logger.error(error_msg)
            raise Exception(error_msg)
    
    def speak_phonetically(self, text: str, combine_phonemes: bool = True) -> Dict[str, str]:
        """按音素拼读文本并生成音频文件
        
        Args:
            text: 要拼读的文本
            combine_phonemes: 是否将所有音素的音频合并为一个文件
        
        Returns:
            字典，包含生成的音频文件路径信息
        """
        phonemes = self.text_to_phonemes(text)
        print(f"文本: {text}")
        print(f"音素: {' '.join(phonemes)}")
        
        result = {
            "text": text,
            "phonemes": phonemes,
            "individual_phoneme_files": [],
            "full_text_file": ""
        }
        
        # 为每个音素生成音频文件
        combined_phonemes_audio = None
        
        for i, phoneme in enumerate(phonemes):
            print(f"处理音素: {phoneme}")
            try:
                # 为每个音素生成唯一的文件名
                phoneme_file = self.speak_text(phoneme, output_file=self._generate_unique_filename(f"phoneme_{i}_{phoneme}"))
                result["individual_phoneme_files"].append(phoneme_file)
                
            except Exception as e:
                print(f"处理音素 '{phoneme}' 时出错: {str(e)}")
        
        # 为整个文本生成音频文件
        try:
            result["full_text_file"] = self.speak_text(text, output_file=self._generate_unique_filename(f"full_{text[:10]}"))
        except Exception as e:
            print(f"处理完整文本时出错: {str(e)}")
        
        return result

# 初始化PhonicsSpeaker实例
speaker = PhonicsSpeaker()

# API端点实现
@app.get("/")
async def root():
    """
    API根路径，返回服务信息和可用端点
    """
    return {
        "message": "Kitten TTS API Service",
        "version": "1.0",
        "endpoints": ["/api/tts", "/api/voices", "/api/languages", "/api/phonetics", "/api/spelling-audio"]
    }

@app.get("/api/languages")
async def get_languages():
    """
    获取可用的语言列表
    eSpeak支持多种语言，但这里我们返回常用的语言
    """
    try:
        # eSpeak支持多种语言，但我们返回一个常用的语言列表
        languages = ["en-us", "en-gb", "zh", "ja", "ko", "fr", "de", "es", "it", "pt"]
        return {"languages": languages}
    except Exception as e:
        logger.error(f"获取语言列表时出错: {e}")
        raise HTTPException(status_code=500, detail=f"获取语言列表时出错: {str(e)}")

@app.get("/api/voices")
async def get_voices(language: str = "en-us"):
    """
    获取可用的语音列表
    返回系统实际可用的语音列表
    """
    try:
        # 系统实际可用的语音列表
        if language == "en-us":
            voices = ["en-us", "en-gb", "en-us-nyc"]
        else:
            # 对于其他语言，返回通用语音列表
            voices = ["default", "male", "female"]
        return {"voices": voices, "language": language}
    except Exception as e:
        logger.error(f"获取语音列表时出错: {e}")
        raise HTTPException(status_code=500, detail=f"获取语音列表时出错: {str(e)}")

@app.post("/api/tts")
async def tts_endpoint(request: TTSRequest):
    """
    文本转语音API端点
    使用eSpeak将文本转换为语音
    """
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        # 验证音节变化参数范围
        if request.syllable_variation is not None and (request.syllable_variation < 0.0 or request.syllable_variation > 1.0):
            raise HTTPException(
                status_code=400,
                detail="Syllable variation must be between 0.0 and 1.0"
            )
        
        logger.info(f"Processing TTS request: {request.text[:30]}..., voice: {request.voice}, speed: {request.speed}, language: {request.language}")
        
        # 生成音频文件
        # 调整PhonicsSpeaker的语速
        original_rate = speaker.rate
        speaker.rate = int(original_rate * request.speed)
        
        # 记录音调控制参数
        pitch_info = []
        if request.stress_pattern:
            pitch_info.append(f"重音模式: {request.stress_pattern}")
        if request.syllable_variation is not None:
            pitch_info.append(f"音节变化: {request.syllable_variation}")
        if request.use_default_pitch_variation:
            pitch_info.append("使用默认音调变化")
        
        if pitch_info:
            logger.info(f"音调控制: {', '.join(pitch_info)}")
        
        # 生成音频 - 根据是否有重音/音节变化参数选择不同的方法
        if request.stress_pattern or request.use_default_pitch_variation:
            # 对于单个单词或简短文本，可以使用spell_word方法实现重音控制
            words = request.text.split()
            if len(words) <= 5 and all(word.isalnum() for word in words):
                # 对于短文本或单个单词，使用spell_word方法
                output_file = speaker.spell_word(
                    request.text,
                    stress_pattern=request.stress_pattern,
                    voice=request.voice
                )
            else:
                # 对于长文本，应用简单的重音标记到文本中
                processed_text = request.text
                if request.stress_pattern:
                    # 简单实现：将重音标记应用到文本中
                    if request.stress_pattern.startswith("'"):
                        # 在文本前添加标记表示提高音调
                        processed_text = f'[x15]{processed_text}[x0]'
                    elif request.stress_pattern.startswith(","):
                        # 分割文本并在第二个单词上添加标记
                        words = processed_text.split()
                        if len(words) > 1:
                            words[1] = f'[x15]{words[1]}[x0]'
                            processed_text = ' '.join(words)
                
                # 应用默认音调变化（如果启用）
                if request.use_default_pitch_variation:
                    # 对于较长文本，在句子末尾添加音调下降标记
                    if '.' in processed_text or '!' in processed_text or '?' in processed_text:
                        processed_text += '_'
                    else:
                        # 否则在适当位置添加音调变化
                        processed_text = processed_text.replace('.', '._')
                        processed_text = processed_text.replace('!', '!_')
                        processed_text = processed_text.replace('?', '?_')
                
                output_file = speaker.speak_text(processed_text, voice=request.voice)
        else:
            # 没有重音或音节变化参数，使用标准TTS生成
            output_file = speaker.speak_text(request.text, voice=request.voice)
        
        # 恢复原始语速
        speaker.rate = original_rate
        
        logger.info(f"TTS audio generated successfully")
        
        # 返回音频文件
        return FileResponse(
            path=output_file,
            media_type="audio/wav",
            filename=f"tts_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"TTS audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"TTS audio generation error: {str(e)}")

@app.post("/api/phonetics")
async def phonetics_endpoint(request: PhoneticsRequest):
    """
    文本转音素API端点
    将输入文本转换为音素表示
    """
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        logger.info(f"Processing phonetic conversion for text: {request.text[:30]}...")
        
        # 调用PhonicsSpeaker的text_to_phonemes方法
        phonemes = speaker.text_to_phonemes(request.text)
        
        logger.info("Phonetic conversion completed successfully")
        
        return {
            "original_text": request.text,
            "phonetic_representation": phonemes,
            "tokens": phonemes
        }
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Phonetic conversion error: {e}")
        raise HTTPException(status_code=500, detail=f"Phonetic conversion error: {str(e)}")

@app.post("/api/spelling-audio")
async def spelling_audio_endpoint(request: SpellingAudioRequest):
    """
    单词拼读语音API端点
    为单词生成拼读语音
    """
    try:
        # 验证输入参数
        if not request.word:
            raise HTTPException(status_code=400, detail="Word parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        logger.info(f"Processing spelling audio for word: {request.word}, voice: {request.voice}, speed: {request.speed}")
        
        # 调整PhonicsSpeaker的语速
        original_rate = speaker.rate
        speaker.rate = int(original_rate * request.speed)
        
        # 生成拼读语音
        output_file = speaker.spell_word(request.word, voice=request.voice)
        
        # 恢复原始语速
        speaker.rate = original_rate
        
        logger.info(f"Spelling audio generated successfully for word: {request.word}")
        
        # 返回音频文件
        return FileResponse(
            path=output_file,
            media_type="audio/wav",
            filename=f"spelling_{request.word}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Spelling audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"Spelling audio generation error: {str(e)}")

@app.post("/api/advanced-spelling-audio")
async def advanced_spelling_audio_endpoint(request: AdvancedSpellingRequest):
    """
    高级单词拼读语音API端点
    支持多种拼读模式和音调控制
    """
    try:
        # 验证输入参数
        if not request.word:
            raise HTTPException(status_code=400, detail="Word parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        # 验证重音强度参数
        if request.emphasis is not None and (request.emphasis < 0 or request.emphasis > 5):
            raise HTTPException(
                status_code=400,
                detail="Emphasis must be between 0 and 5"
            )
        
        logger.info(f"Processing advanced spelling audio for word: {request.word}, mode: {request.mode}")
        
        # 调整PhonicsSpeaker的语速
        original_rate = speaker.rate
        speaker.rate = int(original_rate * request.speed)
        
        # 生成拼读语音
        output_file = speaker.spell_word(
            request.word,
            letter_gap=request.letter_gap,
            emphasis=request.emphasis,
            stress_pattern=request.stress_pattern,
            voice=request.voice
        )
        
        # 恢复原始语速
        speaker.rate = original_rate
        
        # 记录音调控制参数
        pitch_info = []
        if request.stress_pattern:
            pitch_info.append(f"重音模式: {request.stress_pattern}")
        if request.emphasis is not None:
            pitch_info.append(f"重音强度: {request.emphasis}")
        if request.letter_gap != 100:
            pitch_info.append(f"字母间隔: {request.letter_gap}ms")
        
        if pitch_info:
            logger.info(f"音调控制: {', '.join(pitch_info)}")
        
        logger.info(f"Advanced spelling audio generated successfully for word: {request.word}")
        
        # 返回音频文件
        return FileResponse(
            path=output_file,
            media_type="audio/wav",
            filename=f"advanced_spelling_{request.word}_{request.mode}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Advanced spelling audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"Advanced spelling audio generation error: {str(e)}")

# 添加CORS支持
@app.middleware("http")
async def add_cors_headers(request, call_next):
    response = await call_next(request)
    response.headers["Access-Control-Allow-Origin"] = "*"
    response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
    response.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization"
    return response

# 启动应用
if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)