# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Liu Yue)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#   http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import argparse
import numpy as np
import torch
import torchaudio
import random
import librosa
import tempfile
import base64
import io
from typing import Optional, List
from fastapi import FastAPI, File, UploadFile, Form, HTTPException, BackgroundTasks
from fastapi.responses import StreamingResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
import requests
import json

ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR))
from cosyvoice.cli.cosyvoice import CosyVoice, CosyVoice2
from cosyvoice.utils.file_utils import load_wav, logging
from cosyvoice.utils.common import set_all_random_seed

app = FastAPI(title="CosyVoice API", description="API 接口服务，基于 CosyVoice 模型")

# 添加 CORS 中间件
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 允许所有来源，生产环境应该设置具体的域名
    allow_credentials=True,
    allow_methods=["*"],  # 允许所有方法
    allow_headers=["*"],  # 允许所有头部
)

# 模式列表，与webui保持一致
inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制']
max_val = 0.8
prompt_sr = 16000

# 请求模型定义
class TTSRequest(BaseModel):
    tts_text: str
    mode: str
    sft_speaker: Optional[str] = None
    prompt_text: Optional[str] = None
    instruct_text: Optional[str] = None
    seed: int = 0
    stream: bool = False
    speed: float = 1.0

class QwenRequest(BaseModel):
    prompt: str

def postprocess(speech, top_db=60, hop_length=220, win_length=440):
    speech, _ = librosa.effects.trim(
        speech, top_db=top_db,
        frame_length=win_length,
        hop_length=hop_length
    )
    if speech.abs().max() > max_val:
        speech = speech / speech.abs().max() * max_val
    speech = torch.concat([speech, torch.zeros(1, int(cosyvoice.sample_rate * 0.2))], dim=1)
    return speech

@app.get("/")
def read_root():
    return {"message": "CosyVoice API 服务正在运行"}

@app.get("/speakers")
async def get_speakers():
    try:
        # 获取预训练音色列表
        pretrained_speakers = cosyvoice.list_available_spks()
        if len(pretrained_speakers) == 0:
            pretrained_speakers = ["中文女", "中文男", "英文女", "英文男"]
        return {
            "speakers": pretrained_speakers
        }
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/tts")
async def text_to_speech(request: TTSRequest):
    """
    文本转语音API
    
    - **tts_text**: 需要合成的文本
    - **mode**: 推理模式 ('预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制')
    - **sft_speaker**: 预训练音色名称（仅用于预训练音色和自然语言控制模式）
    - **prompt_text**: 示例文本（仅用于3s极速复刻模式）
    - **instruct_text**: 指令文本（仅用于自然语言控制模式）
    - **seed**: 随机种子
    - **stream**: 是否流式推理
    - **speed**: 语速调节
    """
    try:
        # 验证请求参数
        if request.mode not in inference_mode_list:
            raise HTTPException(status_code=400, detail=f"无效的模式，可用模式: {inference_mode_list}")
        
        # 根据模式进行参数验证和处理
        if request.mode == '预训练音色':
            if not request.sft_speaker or request.sft_speaker not in cosyvoice.list_available_spks():
                raise HTTPException(status_code=400, detail="请提供有效的预训练音色名称")
            
            set_all_random_seed(request.seed)
            if request.stream:
                # 流式推理
                async def generate_audio():
                    for result in cosyvoice.inference_sft(
                        request.tts_text, 
                        request.sft_speaker, 
                        stream=True,
                        speed=request.speed
                    ):
                        audio_data = result['tts_speech'].numpy().flatten()
                        wav_io = io.BytesIO()
                        torchaudio.save(wav_io, torch.tensor(audio_data).unsqueeze(0), cosyvoice.sample_rate, format="wav")
                        wav_io.seek(0)
                        yield wav_io.read()
                
                return StreamingResponse(
                    generate_audio(),
                    media_type="audio/mpeg"
                )
            else:
                # 非流式推理
                result = next(cosyvoice.inference_sft(
                    request.tts_text, 
                    request.sft_speaker, 
                    stream=False,
                    speed=request.speed
                ))
                audio_data = result['tts_speech'].numpy().flatten()
                wav_io = io.BytesIO()
                torchaudio.save(wav_io, torch.tensor(audio_data).unsqueeze(0), cosyvoice.sample_rate, format="wav")
                wav_io.seek(0)
                return StreamingResponse(wav_io, media_type="audio/wav")
            
        elif request.mode == '自然语言控制':
            if cosyvoice.instruct is False:
                raise HTTPException(status_code=400, detail="当前模型不支持自然语言控制模式，请使用iic/CosyVoice-300M-Instruct模型")
            
            if not request.instruct_text:
                raise HTTPException(status_code=400, detail="自然语言控制模式需要提供instruct_text")
                
            set_all_random_seed(request.seed)
            if request.stream:
                # 流式推理
                async def generate_audio():
                    for result in cosyvoice.inference_instruct(
                        request.tts_text, 
                        request.sft_speaker, 
                        request.instruct_text, 
                        stream=True,
                        speed=request.speed
                    ):
                        audio_data = result['tts_speech'].numpy().flatten()
                        wav_io = io.BytesIO()
                        torchaudio.save(wav_io, torch.tensor(audio_data).unsqueeze(0), cosyvoice.sample_rate, format="wav")
                        wav_io.seek(0)
                        yield wav_io.read()
                
                return StreamingResponse(
                    generate_audio(),
                    media_type="audio/mpeg"
                )
            else:
                # 非流式推理
                result = next(cosyvoice.inference_instruct(
                    request.tts_text, 
                    request.sft_speaker, 
                    request.instruct_text, 
                    stream=False,
                    speed=request.speed
                ))
                audio_data = result['tts_speech'].numpy().flatten()
                wav_io = io.BytesIO()
                torchaudio.save(wav_io, torch.tensor(audio_data).unsqueeze(0), cosyvoice.sample_rate, format="wav")
                wav_io.seek(0)
                return StreamingResponse(wav_io, media_type="audio/wav")
            
        else:
            raise HTTPException(status_code=400, detail="此API路径不支持复刻模式，请使用/tts_clone接口")
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@app.post("/tts_clone")
async def clone_voice(
    tts_text: str = Form(...),
    mode: str = Form(...),
    prompt_text: Optional[str] = Form(None),
    seed: int = Form(0),
    stream: bool = Form(False),
    speed: float = Form(1.0),
    prompt_audio: UploadFile = File(...)
):
    """
    复刻音色API
    
    - **tts_text**: 需要合成的文本
    - **mode**: 推理模式 ('3s极速复刻', '跨语种复刻')
    - **prompt_text**: 示例文本（仅用于3s极速复刻模式）
    - **seed**: 随机种子
    - **stream**: 是否流式推理
    - **speed**: 语速调节
    - **prompt_audio**: 上传的示例音频文件
    """
    # 验证请求参数
    if mode not in ['3s极速复刻', '跨语种复刻']:
        raise HTTPException(status_code=400, detail=f"无效的模式，可用模式: '3s极速复刻', '跨语种复刻'")
    
    # 保存上传的音频文件
    with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as temp_file:
        content = await prompt_audio.read()
        temp_file.write(content)
        temp_audio_path = temp_file.name
    
    try:
        # 检查音频采样率
        if torchaudio.info(temp_audio_path).sample_rate < prompt_sr:
            raise HTTPException(status_code=400, 
                             detail=f'prompt音频采样率{torchaudio.info(temp_audio_path).sample_rate}低于{prompt_sr}')
        
        # 加载并处理音频
        prompt_speech_16k = postprocess(load_wav(temp_audio_path, prompt_sr))
        
        # 根据模式进行参数验证和处理
        set_all_random_seed(seed)
        
        if mode == '3s极速复刻':
            if not prompt_text:
                raise HTTPException(status_code=400, detail="3s极速复刻模式需要提供prompt_text")
                
            result = next(cosyvoice.inference_zero_shot(
                tts_text, 
                prompt_text, 
                prompt_speech_16k, 
                stream=stream, 
                speed=speed
            ))
            
        elif mode == '跨语种复刻':
            if cosyvoice.instruct is True:
                raise HTTPException(status_code=400, 
                                 detail="当前模型不支持跨语种复刻模式，请使用iic/CosyVoice-300M模型")
                
            result = next(cosyvoice.inference_cross_lingual(
                tts_text, 
                prompt_speech_16k, 
                stream=stream, 
                speed=speed
            ))
        
        # 返回音频数据
        audio_data = result['tts_speech'].numpy().flatten()
        
        # 将numpy数组转换为wav数据
        wav_io = io.BytesIO()
        torchaudio.save(wav_io, torch.tensor(audio_data).unsqueeze(0), cosyvoice.sample_rate, format="wav")
        wav_io.seek(0)
        
        return StreamingResponse(wav_io, media_type="audio/wav")
        
    finally:
        # 清理临时文件
        if os.path.exists(temp_audio_path):
            os.unlink(temp_audio_path)

@app.post("/qwen")
async def call_qwen(request: QwenRequest):
    """调用本地 Ollama Qwen2.5 模型"""
    try:
        # 调用本地 Ollama 服务
        response = requests.post(
            "http://localhost:11434/api/generate",
            json={
                "model": "qwen2.5:7b",
                "prompt": request.prompt,
                "stream": False
            }
        )
        
        if response.status_code != 200:
            raise HTTPException(status_code=500, detail="调用 Qwen 模型失败")
            
        result = response.json()
        return {"response": result["response"]}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"调用 Qwen 模型出错: {str(e)}")

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--port',
                        type=int,
                        default=8000)
    parser.add_argument('--host',
                        type=str,
                        default='0.0.0.0')
    parser.add_argument('--model_dir',
                        type=str,
                        default='pretrained_models/CosyVoice2-0.5B',
                        help='local path or modelscope repo id')
    args = parser.parse_args()
    
    import uvicorn
    uvicorn.run(app, host=args.host, port=args.port)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_dir',
                        type=str,
                        default='pretrained_models/CosyVoice2-0.5B',
                        help='local path or modelscope repo id')
    parser.add_argument('--port',
                        type=int,
                        default=8000,
                        help='server port')
    parser.add_argument('--host',
                        type=str,
                        default='0.0.0.0',
                        help='server host')
    args = parser.parse_args()
    
    try:
        cosyvoice = CosyVoice(args.model_dir)
    except Exception:
        try:
            cosyvoice = CosyVoice2(args.model_dir)
        except Exception:
            raise TypeError('无效的模型类型!')

    sft_spk = cosyvoice.list_available_spks()
    if len(sft_spk) == 0:
        sft_spk = ['']
    
    import uvicorn
    uvicorn.run(app, host=args.host, port=args.port) 