from fastapi import FastAPI, HTTPException, BackgroundTasks, UploadFile, File
from fastapi.responses import FileResponse, JSONResponse
from pydantic import BaseModel
import os
import sys
import tempfile
import soundfile as sf
import numpy as np
import datetime
import time
import logging
import whisper
from typing import List, Dict, Any

# 添加项目根目录到Python路径（添加到开头以优先使用本地版本）
project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, project_root)

# 设置eSpeak NG环境变量
espeak_path = "C:\Program Files\eSpeak NG"
os.environ["PHONEMIZER_ESPEAK_PATH"] = os.path.join(espeak_path, "espeak-ng.exe")
if espeak_path not in os.environ["PATH"]:
    os.environ["PATH"] += ";" + espeak_path

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 确保使用国内镜像源
os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

# 导入TTS处理器和拼读功能
from kittentts import KittenTTS, text_to_phonetics, phonetics_to_spelling_text

# 初始化FastAPI应用
app = FastAPI(title="Kitten TTS API", description="Kitten TTS语音合成API服务", version="1.0")

# 测试端点 - 用于验证服务运行状态
@app.get("/api/test")
async def test_endpoint():
    return {"status": "success", "message": "API服务运行正常", "version": "1.0"}

# 定义请求模型
class TTSRequest(BaseModel):
    text: str
    voice: str = "expr-voice-5-m"
    speed: float = 1.0
    language: str = "en-us"

# 定义拼读请求模型
class PhoneticsRequest(BaseModel):
    text: str

# 定义拼读语音请求模型
class SpellingAudioRequest(BaseModel):
    word: str
    voice: str = "expr-voice-5-m"
    speed: float = 1.0

# TTS处理器类
class TTSProcessor:
    def __init__(self):
        """初始化TTS处理器，加载KittenTTS模型和Whisper语音识别模型"""
        try:
            # 初始化不同语言的模型字典
            self.models = {}
            # 默认加载英文模型
            self.models["en-us"] = KittenTTS()
            logger.info("英文KittenTTS模型加载成功")
            
            # 尝试加载中文模型
            try:
                self.models["zh"] = KittenTTS(language="zh")
                logger.info("中文KittenTTS模型加载成功")
            except Exception as zh_error:
                logger.warning(f"中文KittenTTS模型加载失败: {zh_error}")
                logger.warning("注意：中文语音合成需要正确安装eSpeak NG并包含中文词典文件")
                logger.warning("如需中文支持，请确保eSpeak NG安装正确，并包含中文音素表和词典")
                # 如果加载失败，从models字典中移除中文模型（如果存在）
                if "zh" in self.models:
                    del self.models["zh"]
            
            logger.info(f"KittenTTS模型加载完成，可用语言: {list(self.models.keys())}")
            
            # 加载Whisper语音识别模型
            # 可以选择不同大小的模型: tiny, base, small, medium, large
            self.whisper_model = whisper.load_model("base")
            logger.info("Whisper语音识别模型加载成功")
        except Exception as e:
            logger.error(f"模型加载失败: {e}")
            raise
    
    def generate_audio(self, text: str, voice: str = 'expr-voice-5-m', speed: float = 1.0, language: str = 'en-us') -> np.ndarray:
        """生成音频数据"""
        try:
            # 确保语言参数有效
            if language not in self.models:
                # 如果请求的语言不存在，使用默认语言
                logger.warning(f"Language '{language}' not available, using default: en-us")
                language = "en-us"
                
            logger.info(f"Generating audio for text: {text[:30]}... with voice {voice}, speed {speed}, language {language}")
            audio = self.models[language].generate(text, voice=voice, speed=speed)
            logger.info(f"Audio generated successfully, shape: {audio.shape}")
            return audio
        except Exception as e:
            logger.error(f"Failed to generate audio: {e}")
            raise
    
    def get_available_voices(self, language: str = 'en-us') -> List[str]:
        """获取可用的语音列表"""
        try:
            # 使用指定语言的模型获取语音列表
            if language in self.models:
                return self.models[language].available_voices
            # 返回默认的语音列表作为后备
            return [
                'expr-voice-2-m', 'expr-voice-2-f', 
                'expr-voice-3-m', 'expr-voice-3-f', 
                'expr-voice-4-m', 'expr-voice-4-f', 
                'expr-voice-5-m', 'expr-voice-5-f'
            ]
        except Exception as e:
            logger.error(f"Failed to get available voices: {e}")
            # 返回默认的语音列表作为后备
            return [
                'expr-voice-2-m', 'expr-voice-2-f', 
                'expr-voice-3-m', 'expr-voice-3-f', 
                'expr-voice-4-m', 'expr-voice-4-f', 
                'expr-voice-5-m', 'expr-voice-5-f'
            ]
    
    def get_available_languages(self) -> List[str]:
        """获取可用的语言列表"""
        return list(self.models.keys())
    
    def recognize_speech(self, audio_path: str, language: str = 'en') -> str:
        """使用OpenAI Whisper识别音频文件中的语音并转换为文本
        
        Args:
            audio_path: 音频文件路径
            language: 语言代码，默认为'en'
            
        Returns:
            识别出的文本
        """
        try:
            logger.info(f"开始识别音频文件: {audio_path}，语言: {language}")
            
            # 使用Whisper模型进行语音识别
            # 如果指定了语言，可以提高识别准确率
            # 如果不指定语言，Whisper会自动检测
            if language:
                result = self.whisper_model.transcribe(audio_path, language=language)
            else:
                result = self.whisper_model.transcribe(audio_path)
            
            text = result["text"]
            detected_language = result["language"]
            
            logger.info(f"语音识别成功，识别结果: {text[:30]}...，检测到的语言: {detected_language}")
            return text
        except Exception as e:
            logger.error(f"语音识别过程中发生错误: {e}")
            raise HTTPException(status_code=500, detail=f"语音识别错误: {str(e)}")

# 初始化TTS处理器
processor = TTSProcessor()

# API端点
@app.get("/")
async def root():
    return {
        "message": "Kitten TTS API Service",
        "version": "1.0",
        "endpoints": ["/api/tts", "/api/voices", "/api/languages", "/api/phonetics", "/api/spelling-audio"]
    }

@app.get("/api/languages")
async def get_languages():
    """获取可用的语言列表"""
    try:
        languages = processor.get_available_languages()
        return {"languages": languages}
    except Exception as e:
        logger.error(f"Error getting languages: {e}")
        raise HTTPException(status_code=500, detail=f"Error getting languages: {str(e)}")

@app.get("/api/voices")
async def get_voices(language: str = "en-us"):
    """获取可用的语音列表"""
    try:
        voices = processor.get_available_voices(language)
        return {"voices": voices, "language": language}
    except Exception as e:
        logger.error(f"Error getting voices: {e}")
        raise HTTPException(status_code=500, detail=f"Error getting voices: {str(e)}")

@app.post("/api/tts")
async def tts_endpoint(request: TTSRequest):
    """文本转语音API端点"""
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        # 检查语言参数
        available_languages = processor.get_available_languages()
        if request.language not in available_languages:
            raise HTTPException(
                status_code=400,
                detail={
                    'error': f'Language "{request.language}" not available',
                    'available_languages': available_languages
                }
            )
        
        # 检查语音参数
        available_voices = processor.get_available_voices(request.language)
        if request.voice not in available_voices:
            raise HTTPException(
                status_code=400,
                detail={
                    'error': f'Voice "{request.voice}" not available',
                    'available_voices': available_voices
                }
            )
        
        # 生成音频
        start_time = time.time()
        audio = processor.generate_audio(
            request.text,
            voice=request.voice,
            speed=request.speed,
            language=request.language
        )
        process_time = time.time() - start_time
        
        # 创建临时文件保存音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            sf.write(temp_file.name, audio, 24000)
            temp_filename = temp_file.name
        
        logger.info(f"TTS request processed in {process_time:.2f} seconds")
        
        # 返回音频文件
        return FileResponse(
            path=temp_filename,
            media_type="audio/wav",
            filename=f"tts_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"TTS processing error: {e}")
        raise HTTPException(status_code=500, detail=f"TTS processing error: {str(e)}")

@app.post("/api/speech-to-text")
async def speech_to_text(file: UploadFile = File(...), language: str = 'en'):
    """语音转文本API端点
    
    接受音频文件上传并将其转换为文本
    """
    try:
        # 验证文件类型
        if not file.content_type.startswith('audio/'):
            raise HTTPException(status_code=400, detail="请上传音频文件")
        
        # 创建临时文件保存上传的音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            # 读取上传的文件内容
            content = await file.read()
            temp_file.write(content)
            temp_filename = temp_file.name
        
        logger.info(f"接收到音频文件: {file.filename}，大小: {len(content)} 字节")
        
        # 调用语音识别方法
        start_time = time.time()
        text = processor.recognize_speech(temp_filename, language)
        process_time = time.time() - start_time
        
        logger.info(f"语音识别完成，耗时: {process_time:.2f}秒")
        
        # 删除临时文件
        try:
            os.remove(temp_filename)
        except:
            pass
        
        return {
            "text": text,
            "language": language,
            "filename": file.filename
        }
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"语音识别处理错误: {e}")
        raise HTTPException(status_code=500, detail=f"语音识别处理错误: {str(e)}")

@app.post("/api/spelling-audio")
async def spelling_audio_endpoint(request: SpellingAudioRequest):
    """单词拼读语音API端点
    
    输入单词，输出该单词的拼读语音
    
    Args:
        request: 包含要拼读的单词、语音和语速的请求对象
        
    Returns:
        音频文件响应
    """
    try:
        # 验证输入参数
        if not request.word:
            raise HTTPException(status_code=400, detail="Word parameter is required")
        
        if request.speed <= 0:
            raise HTTPException(status_code=400, detail="Speed must be greater than 0")
        
        logger.info(f"Processing spelling audio for word: {request.word}")
        
        # 获取单词的音素表示
        phonetic_result = text_to_phonetics(request.word)
        phonetic_representation = phonetic_result["phonetic_representation"]
        
        # 将音素表示转换为可读的拼写文本
        spelling_text = phonetics_to_spelling_text(phonetic_representation)
        logger.info(f"Generated spelling text: {spelling_text}")
        
        # 生成拼读语音
        audio = processor.generate_audio(
            spelling_text,
            voice=request.voice,
            speed=request.speed,
            language="en-us"  # 拼读功能目前仅支持英文
        )
        
        # 创建临时文件保存音频
        with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as temp_file:
            sf.write(temp_file.name, audio, 24000)
            temp_filename = temp_file.name
        
        logger.info(f"Spelling audio generated successfully for word: {request.word}")
        
        # 返回音频文件
        return FileResponse(
            path=temp_filename,
            media_type="audio/wav",
            filename=f"spelling_{request.word}_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.wav"
        )
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Spelling audio generation error: {e}")
        raise HTTPException(status_code=500, detail=f"Spelling audio generation error: {str(e)}")
        
@app.post("/api/phonetics")
async def phonetics_endpoint(request: PhoneticsRequest):
    """文本转音素API端点
    
    将输入文本转换为音素表示，使用NLTK进行词性标注和CMU词典进行发音转换
    
    Args:
        request: 包含要转换的文本的请求对象
        
    Returns:
        JSON响应，包含原始文本和音素表示
    """
    try:
        # 验证输入参数
        if not request.text:
            raise HTTPException(status_code=400, detail="Text parameter is required")
        
        logger.info(f"Processing phonetic conversion for text: {request.text[:30]}...")
        
        # 调用拼读功能
        result = text_to_phonetics(request.text)
        
        logger.info("Phonetic conversion completed successfully")
        
        return {
            "original_text": result["original_text"],
            "phonetic_representation": result["phonetic_representation"],
            "tokens": [token[0] for token in result["tokens"]],
            "tagged_tokens": result["tokens"]
        }
    except HTTPException as he:
        raise he
    except Exception as e:
        logger.error(f"Phonetic conversion error: {e}")
        raise HTTPException(status_code=500, detail=f"Phonetic conversion error: {str(e)}")

# 添加CORS支持
@app.middleware("http")
async def add_cors_headers(request, call_next):
    response = await call_next(request)
    response.headers["Access-Control-Allow-Origin"] = "*"
    response.headers["Access-Control-Allow-Methods"] = "GET, POST, OPTIONS"
    response.headers["Access-Control-Allow-Headers"] = "Content-Type, Authorization"
    return response

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)