from fastapi import FastAPI, HTTPException, UploadFile, File, Form, Depends
from fastapi.responses import FileResponse
from pydantic import BaseModel
from typing import Optional
import torch
from TTS.api import TTS
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
from time import time

# 创建 FastAPI 应用
app = FastAPI()

# # 加载 TTS 模型
device = "npu"
local_model_path = "/mnt/model_weights/tts/XTTS-v2"
local_config_path = "/mnt/model_weights/tts/XTTS-v2/config.json"
tts = TTS(model_path=local_model_path, config_path=local_config_path, progress_bar=False).to(device)

# 加载 Whisper 模型
model_id = "/mnt/model_weights/tts/whisper-large-v3-turbo"
torch_dtype = torch.float16
modelWhisper = AutoModelForSpeechSeq2Seq.from_pretrained(
    model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
)
modelWhisper.to(device)
processor = AutoProcessor.from_pretrained(model_id)
pipe = pipeline(
    "automatic-speech-recognition",
    model=modelWhisper,
    tokenizer=processor.tokenizer,
    feature_extractor=processor.feature_extractor,
    torch_dtype=torch_dtype,
    device=device,
)

# 定义依赖项，用于获取 TTS 请求的字段
class TTSRequest(BaseModel):
    model: str
    input: str
    language: str

def get_tts_request(model: str = Form(...), input: str = Form(...), language: str = Form(...)):
    return TTSRequest(model=model, input=input, language=language)

@app.post("/v1/audio/speech")
async def ttss(request: TTSRequest = Depends(get_tts_request), speaker_file: UploadFile = File(None)):
    if request.model != "XTTS-v2":
        raise HTTPException(status_code=400, detail="Invalid model name")

    # 生成语音文件的路径
    output_path = f"/mnt/model_lite/audio_tmp/{time()}out.wav"

    # 如果用户上传了 speaker_file，先保存到本地临时文件
    if speaker_file:
        temp_file_path = "/mnt/model_lite/audio_tmp" + speaker_file.filename  # 存储路径可以根据需要修改
        with open(temp_file_path, "wb") as f:
            f.write(await speaker_file.read())
        speaker_wav = temp_file_path  # 使用用户上传的语音文件
    else:
        # 如果没有上传文件，则使用默认语音文件
        speaker_wav = "asr_example_zh.wav"

    # 使用 tts.tts_to_file 进行语音合成
    tts.tts_to_file(
        text=request.input,  # 使用从 Depends 中传递的 input
        speaker_wav=speaker_wav,  # 使用上传的文件或默认文件
        language=request.language,  # 使用从 Depends 中传递的 language
        file_path=output_path
    )
    # 删除临时文件
    if temp_file_path and os.path.exists(temp_file_path):
        try:
            os.remove(temp_file_path)
            print(f"Temporary file {temp_file_path} deleted successfully.")
        except Exception as e:
            print(f"Failed to delete temporary file {temp_file_path}: {e}")
    return FileResponse(output_path, media_type="audio/wav", filename="speech.wav")



class ASRRequest(BaseModel):
    model: str
    language: str

def get_transcriptions_params(model: str = Form(...), language: str = Form(...)):
    return ASRRequest(model=model, language=language)

@app.post("/v1/audio/transcriptions")
async def transcribe(request: ASRRequest = Depends(get_transcriptions_params), audio_file: UploadFile = File(...)):
    if request.model != "whisper-large-v3-turbo":
        raise HTTPException(status_code=400, detail="Invalid model name")

    # 读取音频文件
    print("Reading audio file...")
    audio_bytes = await audio_file.read()
    print("Audio file read successfully")

    # 使用 Whisper 模型进行转录
    result = pipe(audio_bytes)
    print("Transcription result: ")
    print(result)
    return {"text": result["text"]}

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=10027)