from fastapi import FastAPI, HTTPException, BackgroundTasks, UploadFile, File
from fastapi.responses import FileResponse, JSONResponse
from pydantic import BaseModel
import os
import sys
import tempfile
import soundfile as sf
import numpy as np
import datetime
import time
import logging
import whisper
from typing import List, Dict, Any, Optional

# 添加项目根目录到Python路径（添加到开头以优先使用本地版本）
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)

# 设置eSpeak NG环境变量
espeak_path = "C:\Program Files\eSpeak NG"
os.environ["PHONEMIZER_ESPEAK_PATH"] = os.path.join(espeak_path, "espeak-ng.exe")
if espeak_path not in os.environ["PATH"]:
    os.environ["PATH"] += ";" + espeak_path

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 音调控制辅助函数
def apply_stress_pattern(chars: list, stress_pattern: str, separator: str = " ") -> str:
    """应用指定的重音模式到字符列表
    
    Args:
        chars: 字符列表
        stress_pattern: 重音模式字符串
        separator: 字符之间的分隔符
        
    Returns:
        添加了音调标记的拼读文本
    """
    if not chars:
        return ''
        
    # 构建带音调标记的文本
    result = []
    if stress_pattern.startswith("'"):
        # 第一个音节重读
        for i, char in enumerate(chars):
            if i == 0:
                # 第一个字母提高音调
                result.append(f'[x20]{char}[x0]')
            else:
                result.append(char)
    elif stress_pattern.startswith(","):
        # 第二个音节重读
        for i, char in enumerate(chars):
            if i == 1 and len(chars) > 1:
                # 第二个字母提高音调
                result.append(f'[x20]{char}[x0]')
            else:
                result.append(char)
    else:
        # 如果模式不匹配，回退到默认的音调变化
        result = chars
        
    return separator.join(result)
    

def apply_default_pitch_variation(chars: list, separator: str = " ") -> str:
    """应用默认的音调变化规则到字符列表
    
    对于较长的单词，在不同位置使用不同的音调变化
    
    Args:
        chars: 字符列表
        separator: 字符之间的分隔符
        
    Returns:
        添加了音调变化的拼读文本
    """
    if not chars:
        return ''
        
    result = []
    for i, char in enumerate(chars):
        # 根据位置添加音调变化
        if len(chars) > 2:
            if i == 0 or i == len(chars) - 1:
                # 第一个和最后一个字母使用较高音调
                result.append(f'[x10]{char}[x0]')
            elif i == len(chars) // 2:
                # 中间字母使用最高音调
                result.append(f'[x20]{char}[x0]')
            else:
                result.append(char)
        else:
            # 短单词简单变化
            result.append(f'[x{10 if i % 2 == 0 else 0}]{char}[x0]')
            
    return separator.join(result)

# 确保使用国内镜像源
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 拼写音频功能所需导入
import nltk
from nltk.tokenize import word_tokenize

# 确保NLTK资源已安装
logging.info("正在检查并安装NLTK资源...")

# 需要安装的NLTK资源列表
required_resources = ['punkt_tab', 'averaged_perceptron_tagger_eng']

for resource in required_resources:
    try:
        # 尝试加载资源，如果失败则下载
        nltk.data.find(f'tokenizers/{resource}' if resource == 'punkt_tab' else f'taggers/{resource}')
        logging.info(f"NLTK {resource}资源已存在")
    except LookupError:
        logging.info(f"正在安装NLTK {resource}资源...")
        # 创建nltk_data目录（如果不存在）
        nltk_data_dir = os.path.join(os.path.expanduser("~"), "nltk_data")
        if not os.path.exists(nltk_data_dir):
            os.makedirs(nltk_data_dir)
        nltk.download(resource)
        logging.info(f"NLTK {resource}资源安装完成")

logging.info("所有NLTK资源检查和安装完成")

# 导入TTS处理器和拼读功能
from kittentts import KittenTTS, text_to_phonetics, phonetics_to_spelling_text, word_to_spelling

# 初始化FastAPI应用
app = FastAPI(title="Kitten TTS API", description="Kitten TTS语音合成API服务", version="1.0")

# 测试端点 - 用于验证服务运行状态
@app.get("/api/test")
async def test_endpoint():
    return {"status": "success", "message": "API服务运行正常", "version": "1.0"}

# 定义请求模型
class TTSRequest(BaseModel):
    text: str
    voice: str = "expr-voice-5-m"
    speed: float = 1.0
    language: str = "en-us"

# 定义拼读请求模型
class PhoneticsRequest(BaseModel):
    text: str

# 定义拼读语音请求模型
class SpellingAudioRequest(BaseModel):
    word: str
    voice: str = "expr-voice-5-m"
    speed: float = 1.0

# 定义高级单词拼读请求模型
class AdvancedSpellingRequest(BaseModel):
    word: str
    mode: str = "letter_by_letter"  # letter_by_letter, letter_name, phonemic, combination, crfsuite
    with_pronunciation: bool = False
    letter_separator: str = " "
    with_word_pronunciation: bool = False
    voice: str = "expr-voice-5-m"
    speed: float = 1.0
    # 新增重音和音调控制参数
    emphasis: Optional[int] = None  # 重音强度（0-5），None则使用默认值
    stress_pattern: Optional[str] = None  # 重音模式字符串，如"'hello"表示第一个音节重读
    letter_gap: Optional[float] = None  # 字母间停顿时长，单位秒，None则使用默认值

# TTS处理器类
class TTSProcessor:
    def __init__(self):
        """初始化TTS处理器，加载KittenTTS模型和Whisper语音识别模型"""
        try:
            # 初始化不同语言的模型字典
            self.models = {}
            # 默认加载英文模型
            self.models["en-us"] = KittenTTS()
            logger.info("英文KittenTTS模型加载成功")
            
            # 尝试加载中文模型
            try:
                self.models["zh"] = KittenTTS(language="zh")
                logger.info("中文KittenTTS模型加载成功")
            except Exception as zh_error:
                logger.warning(f"中文KittenTTS模型加载失败: {zh_error}")
                logger.warning("注意：中文语音合成需要正确安装eSpeak NG并包含中文词典文件")
                logger.warning("如需中文支持，请确保eSpeak NG安装正确，并包含中文音素表和词典")
                # 如果加载失败，从models字典中移除中文模型（如果存在）
                if "zh" in self.models:
                    del self.models["zh"]
            
            logger.info(f"KittenTTS模型加载完成，可用语言: {list(self.models.keys())}")
            
            # 加载Whisper语音识别模型
            # 可以选择不同大小的模型: tiny, base, small, medium, large
            self.whisper_model = whisper.load_model("base")
            logger.info("Whisper语音识别模型加载成功")
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            raise
    
    def generate_audio(self, text: str, voice: str = 'expr-voice-5-m', speed: float = 1.0, language: str = 'en-us') -> np.ndarray:
        """生成音频数据"""
        try:
            # 确保语言参数有效
            if language not in self.models:
                # 如果请求的语言不存在，使用默认语言
                logger.warning(f"Language '{language}' not available, using default: en-us")
                language = "en-us"
                
            logger.info(f"Generating audio for text: {text[:30]}... with voice {voice}, speed {speed}, language {language}")
            audio = self.models[language].generate(text, voice=voice, speed=speed)
            logger.info(f"Audio generated successfully, shape: {audio.shape}")
            return audio
        except Exception as e:
            logger.error(f"Failed to generate audio: {e}")
            raise
    
    def get_available_voices(self, language: str = 'en-us') -> List[str]:
        """获取可用的语音列表"""
        try:
            # 使用指定语言的模型获取语音列表
            if language in self.models:
                return self.models[language].available_voices
            # 返回默认的语音列表作为后备
            return [
                'expr-voice-2-m', 'expr-voice-2-f', 
                'expr-voice-3-m', 'expr-voice-3-f', 
                'expr-voice-4-m', 'expr-voice-4-f', 
                'expr-voice-5-m', 'expr-voice-5-f'
            ]
        except Exception as e:
            logger.error(f"Failed to get available voices: {e}")
            # 返回默认的语音列表作为后备
            return [
                'expr-voice-2-m', 'expr-voice-2-f', 
                'expr-voice-3-m', 'expr-voice-3-f', 
                'expr-voice-4-m', 'expr-voice-4-f', 
                'expr-voice-5-m', 'expr-voice-5-f'
            ]
    
    def get_available_languages(self) -> List[str]:
        """获取可用的语言列表"""
        return list(self.models.keys())
    
    def recognize_speech(self, audio_path: str, language: str = 'en') -> str:
        """使用OpenAI Whisper识别音频文件中的语音并转换为文本
        
        Args:
            audio_path: 音频文件路径
            language: 语言代码，默认为'en'
            
        Returns:
            识别出的文本
        """
        try:
            logger.info(f"开始识别音频文件: {audio_path}，语言: {language}")
            
            # 使用Whisper模型进行语音识别
            # 如果指定了语言，可以提高识别准确率
            # 如果不指定语言，Whisper会自动检测
            if language:
                result = self.whisper_model.transcribe(audio_path, language=language)
            else:
                result = self.whisper_model.transcribe(audio_path)
            
            text = result["text"]
            detected_language = result["language"]
            
            logger.info(f"语音识别成功，识别结果: {text[:30]}...，检测到的语言: {detected_language}")
            return text
        except Exception as e:
            logger.error(f"语音识别过程中发生错误: {e}")
            raise HTTPException(status_code=500, detail=f"语音识别错误: {str(e)}")

# 初始化TTS处理器
processor = TTSProcessor()

# API端点
@app.get("/")
async def root():
    return {
        "message": "Kitten TTS API Service",
        "version": "1.0",
        "endpoints": ["/api/tts", "/api/voices", "/api/languages", "/api/phonetics", "/api/spelling-audio"]
    }

@app.get("/api/languages")
async def get_languages():
    """获取可用的语言列表"""
    try:
        languages = processor.get_available_languages()
        return {"languages": languages}
    except Exception as e:
        logger.error(f"Error getting languages: {e}")
        raise HTTPException(status_code=500, detail=f"Error getting languages: {str(e)}")

@app.get("/api/voices")
async def get_voices(language: str = "en-us"):
    """获取可用的语音列表"""
    try:
        voices = processor.get_available_voices(language)
        return {"voices": voices, "language": language}
    except Exception as e:
        logger.error(f"Error getting voices: {e}")
        raise HTTPException(status_code=500, detail=f"Error getting voices: {str(e)}")

@app.post("/api/tts")
async def tts_endpoint(request: TTSRequest):
    """文本转语音API端点"""
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        # 检查语言参数
        available_languages = processor.get_available_languages()
        if request.language not in available_languages:
            raise HTTPException(
                status_code=400,
                detail={
                    'error': f'Language "{request.language}" not available',
                    'available_languages': available_languages
                }
            )
        
        # 检查语音参数
        available_voices = processor.get_available_voices(request.language)
        if request.voice not in available_voices:
            raise HTTPException(
                status_code=400,
                detail={
                    'error': f'Voice "{request.voice}" not available',
                    'available_voices': available_voices
                }
            )
        
        # 生成音频
        start_time = time.time()
        audio = processor.generate_audio(
            request.text,
            voice=request.voice,
            speed=request.speed,
            language=request.language
        )
        process_time = time.time() - start_time
        
        # 创建临时文件保存音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            sf.write(temp_file.name, audio, 24000)
            temp_filename = temp_file.name
        
        logger.info(f"TTS request processed in {process_time:.2f} seconds")
        
        # 返回音频文件
        return FileResponse(
            path=temp_filename,
            media_type="audio/wav",
            filename=f"tts_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"TTS processing error: {e}")
        raise HTTPException(status_code=500, detail=f"TTS processing error: {str(e)}")

@app.post("/api/speech-to-text")
async def speech_to_text(file: UploadFile = File(...), language: str = 'en'):
    """语音转文本API端点
    
    接受音频文件上传并将其转换为文本
    """
    try:
        # 验证文件类型
        if not file.content_type.startswith('audio/'):
            raise HTTPException(status_code=400, detail="请上传音频文件")
        
        # 创建临时文件保存上传的音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            # 读取上传的文件内容
            content = await file.read()
            temp_file.write(content)
            temp_filename = temp_file.name
        
        logger.info(f"接收到音频文件: {file.filename}，大小: {len(content)} 字节")
        
        # 调用语音识别方法
        start_time = time.time()
        text = processor.recognize_speech(temp_filename, language)
        process_time = time.time() - start_time
        
        logger.info(f"语音识别完成，耗时: {process_time:.2f}秒")
        
        # 删除临时文件
        try:
            os.remove(temp_filename)
        except:
            pass
        
        return {
            "text": text,
            "language": language,
            "filename": file.filename
        }
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"语音识别处理错误: {e}")
        raise HTTPException(status_code=500, detail=f"语音识别处理错误: {str(e)}")

@app.post("/api/spelling-audio")
async def spelling_audio_endpoint(request: SpellingAudioRequest):
    """单词拼读语音API端点
    
    输入单词，输出该单词的拼读语音
    
    Args:
        request: 包含要拼读的单词、语音和语速的请求对象
        
    Returns:
        音频文件响应
    """
    try:
        # 验证输入参数
        if not request.word:
            raise HTTPException(status_code=400, detail="Word parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        logger.info(f"Processing spelling audio for word: {request.word}")
        
        # 获取单词的音素表示
        phonetic_result = text_to_phonetics(request.word)
        phonetic_representation = phonetic_result["phonetic_representation"]
        
        # 将音素表示转换为可读的拼写文本
        spelling_text = phonetics_to_spelling_text(phonetic_representation)
        logger.info(f"Generated spelling text: {spelling_text}")
        
        # 生成拼读语音
        audio = processor.generate_audio(
            spelling_text,
            voice=request.voice,
            speed=request.speed,
            language="en-us"  # 拼读功能目前仅支持英文
        )
        
        # 创建临时文件保存音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            sf.write(temp_file.name, audio, 24000)
            temp_filename = temp_file.name
        
        logger.info(f"Spelling audio generated successfully for word: {request.word}")
        
        # 返回音频文件
        return FileResponse(
            path=temp_filename,
            media_type="audio/wav",
            filename=f"spelling_{request.word}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Spelling audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"Spelling audio generation error: {str(e)}")

@app.post("/api/advanced-spelling-audio")
async def advanced_spelling_audio_endpoint(request: AdvancedSpellingRequest):
    """高级单词拼读语音API端点
    
    提供多种单词拼读模式的高级语音合成功能
    
    Args:
        request: 包含要拼读的单词、拼读模式、语音和语速的请求对象
        
    Returns:
        音频文件响应和拼读信息
    """
    try:
        # 验证输入参数
        if not request.word:
            raise HTTPException(status_code=400, detail="Word parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        # 验证拼读模式
        valid_modes = ["letter_by_letter", "letter_name", "phonemic", "combination", "crfsuite"]
        if request.mode not in valid_modes:
            raise HTTPException(
                status_code=400,
                detail={
                    'error': f'Invalid spelling mode: "{request.mode}"',
                    'valid_modes': valid_modes
                }
            )
        
        # 验证重音强度参数
        if request.emphasis is not None and (request.emphasis < 0 or request.emphasis > 5):
            raise HTTPException(
                status_code=400,
                detail="Emphasis must be between 0 and 5"
            )
        
        logger.info(f"Processing advanced spelling audio for word: {request.word}, mode: {request.mode}")
        
        # 使用高级单词拼读方法
        spelling_result = word_to_spelling(
            word=request.word,
            mode=request.mode,
            with_pronunciation=request.with_pronunciation,
            letter_separator=request.letter_separator,
            with_word_pronunciation=request.with_word_pronunciation
        )
        
        spelling_text = spelling_result["spelling_text"]
        
        # 应用音调变化控制
        filtered_chars = [char for char in request.word if char.isalnum()]
        
        # 如果是letter_by_letter或letter_name模式且提供了音调控制参数，应用音调变化
        if (request.mode in ["letter_by_letter", "letter_name"] and 
            (request.stress_pattern or request.emphasis is not None)):
            
            # 获取原始字母列表（仅用于音调控制）
            letters = [char for char in request.word if char.isalnum()]
            
            # 应用音调变化
            if request.stress_pattern:
                # 使用指定的重音模式
                spelled_text = apply_stress_pattern(letters, request.stress_pattern, request.letter_separator)
            else:
                # 如果没有提供重音模式，使用默认的音调变化规则
                spelled_text = apply_default_pitch_variation(letters, request.letter_separator)
            
            # 更新拼读文本
            spelling_text = spelled_text
            
            # 如果启用了单词发音，添加到开头
            if request.with_word_pronunciation:
                phonetic_result = text_to_phonetics(request.word)
                pronunciation_text = ' '.join(phonetic_result["phonetic_representation"])
                spelling_text = f"{request.word} {pronunciation_text} {spelling_text}"
        
        # 记录音调控制参数
        pitch_info = []
        if request.stress_pattern:
            pitch_info.append(f"重音模式: {request.stress_pattern}")
        if request.emphasis is not None:
            pitch_info.append(f"重音强度: {request.emphasis}")
        
        logger.info(f"Generated advanced spelling text: {spelling_text}")
        logger.info(f"Spelling info: {spelling_result['info']}")
        if pitch_info:
            logger.info(f"音调控制: {', '.join(pitch_info)}")
        
        # 生成拼读语音
        audio = processor.generate_audio(
            spelling_text,
            voice=request.voice,
            speed=request.speed,
            language="en-us"  # 拼读功能目前仅支持英文
        )
        
        # 创建临时文件保存音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            sf.write(temp_file.name, audio, 24000)
            temp_filename = temp_file.name
        
        logger.info(f"Advanced spelling audio generated successfully for word: {request.word}")
        
        # 返回音频文件和拼读信息
        # 对Unicode文本进行base64编码以安全地通过HTTP头传递
        import base64
        encoded_info = base64.b64encode(spelling_result["info"].encode('utf-8')).decode('ascii')
        encoded_text = base64.b64encode(spelling_text.encode('utf-8')).decode('ascii')
        
        return FileResponse(
            path=temp_filename,
            media_type="audio/wav",
            filename=f"advanced_spelling_{request.word}_{request.mode}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav",
            headers={
                "X-Spelling-Info": encoded_info,
                "X-Spelling-Text": encoded_text,
                "X-Spelling-Encoding": "base64"
            }
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Advanced spelling audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"Advanced spelling audio generation error: {str(e)}")
        
@app.post("/api/phonetics")
async def phonetics_endpoint(request: PhoneticsRequest):
    """文本转音素API端点
    
    将输入文本转换为音素表示，使用NLTK进行词性标注和CMU词典进行发音转换
    
    Args:
        request: 包含要转换的文本的请求对象
        
    Returns:
        JSON响应，包含原始文本和音素表示
    """
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        logger.info(f"Processing phonetic conversion for text: {request.text[:30]}...")
        
        # 调用拼读功能
        result = text_to_phonetics(request.text)
        
        logger.info("Phonetic conversion completed successfully")
        
        return {
            "original_text": result["original_text"],
            "phonetic_representation": result["phonetic_representation"],
            "tokens": [token[0] for token in result["tokens"]],
            "tagged_tokens": result["tokens"]
        }
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Phonetic conversion error: {e}")
        raise HTTPException(status_code=500, detail=f"Phonetic conversion error: {str(e)}")

# 添加CORS支持
@app.middleware("http")
async def add_cors_headers(request, call_next):
    response = await call_next(request)
    response.headers["Access-Control-Allow-Origin"] = "*"
    response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
    response.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization"
    return response

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)