import os
import sys
import uvicorn
import logging
import argparse
import numpy as np
from my_redis import MyRedis
from typing import Optional, Literal
from typing_extensions import Annotated
from utils import load_audio_from_bytes
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel, Field, ValidationError
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi import FastAPI, HTTPException, File, UploadFile, Form


ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(f'{ROOT_DIR}/../../..')
sys.path.append(f'{ROOT_DIR}/../../../third_party/Matcha-TTS')
from async_cosyvoice.async_cosyvoice import AsyncCosyVoice2

logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s %(levelname)s %(message)s'
)

cosyvoice: AsyncCosyVoice2 | None = None

my_redis: MyRedis = MyRedis()
app = FastAPI()

# 配置CORS
app.add_middleware(
    CORSMiddleware,          # noqa
    allow_origins=["*"],     # 允许所有源，生产环境应限制为具体域名
    allow_credentials=True,
    allow_methods=["*"],     # 允许所有方法（如GET、POST等）
    allow_headers=["*"],     # 允许所有请求头
)

class VoiceUploadResponse(BaseModel):
    """音频上传响应参数"""
    voice_spk_id: str = Field(
        ...,
        examples=["001", "雷军"],
        description="音色对应的ID"
    )

# noinspection PyMethodParameters
class SpeechRequest(BaseModel):
    """语音合成请求参数"""
    input: str = Field(
        ...,
        max_length=4096,
        examples=["你好，欢迎使用语音合成服务！"],
        description="需要转换为语音的文本内容"
    )
    voice: str = Field(
        ...,
        examples=[
            "001",
            "speech:voice-name:xxx:xxx",
        ],
        description="音色选择"
    )
    response_format: Optional[Literal["mp3", "wav", "pcm"]] = Field(
        "mp3",
        examples=["mp3", "wav", "pcm"],
        description="输出音频格式"
    )
    sample_rate: Optional[int] = Field(
        24000,
        description="采样率，目前不支持设置，默认为返回 24000 Hz音频数据"
    )
    stream: Optional[bool] = Field(
        False,
        description="开启流式返回。"
    )
    speed: Annotated[Optional[float], Field(strict=True, ge=0.25, le=4.0)] = Field(
        1.0,
        description="语速控制[0.25-4.0]"
    )

def save_voice_data(custom_name: str, audio_data: bytes, text: str) -> str:
    """保存音频数据并生成音色对应的URI"""
    prompt_speech_16k = load_audio_from_bytes(audio_data, 16000)
    cosyvoice.frontend.generate_spk_info(
        custom_name,
        text,
        prompt_speech_16k,
        24000,
        custom_name
    )
    return custom_name

@app.get("/inference_zero_shot")
@app.post("/inference_zero_shot")
async def inference_zero_shot(session_id: str = Form(), tts_text: str = Form()):
    return StreamingResponse(generate_audio(session_id, tts_text))

async def generate_audio(session_id: str = Form(), tts_text: str = Form()):
    """生成音频内容"""
    key: str = f"chat_server:voice:{session_id}"
    my_redis.set_key(key)
    tts_texts: list[str] = tts_text.split(",")
    for tts_text in tts_texts:
        try:
            if not my_redis.check_key_exist(key):
                print("终止本次的语音合成......")
                break
            end_of_prompt_index = tts_text.find("<|endofprompt|>")
            if end_of_prompt_index != -1:
                instruct_text = tts_text[: end_of_prompt_index + len("<|endofprompt|>")]
                tts_text = tts_text[end_of_prompt_index + len("<|endofprompt|>") :]

                audio_data_generator = cosyvoice.inference_instruct2_by_spk_id(
                    tts_text,
                    instruct_text,
                    "001",
                    stream=True,
                    speed="1.0",
                    text_frontend=True,
                )
            else:
                audio_data_generator = cosyvoice.inference_zero_shot_by_spk_id(
                    tts_text,
                    "001",
                    stream=True,
                    speed="1.0",
                    text_frontend=True,
                )
            async for chunk in audio_data_generator:
                tts_audio = (chunk['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes()
                yield tts_audio
        except Exception as e:
            logging.error(f"Processing failed: {str(e)}", exc_info=True)

@app.post("/v1/uploads/audio/voice", response_model=VoiceUploadResponse)
async def upload_voice(
    customName: str = Form(...),
    text: str = Form(...),
    file: UploadFile = File(...)
):
    """## 增加用户自定义音色"""
    try:
        audio_data = await file.read()
        voice_spk_id = save_voice_data(customName, audio_data, text)
        return VoiceUploadResponse(voice_spk_id=voice_spk_id)
    except ValidationError as ve:
        raise HTTPException(422, detail=ve.errors())
    except Exception as e:
        logging.error(f"上传失败: {str(e)}")
        raise HTTPException(500, detail=str(e))

def main(args):
    global cosyvoice
    cosyvoice = AsyncCosyVoice2(args.model_dir, load_jit=args.load_jit, load_trt=args.load_trt, fp16=args.fp16)

    uvicorn.run(app, host=args.host, port=args.port)

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--host', type=str, default='0.0.0.0')
    parser.add_argument('--port', type=int, default=8022)
    parser.add_argument('--model_dir', type=str,
                        default='../../../pretrained_models/CosyVoice2-0.5B',
                        help='local path or modelscope repo id')
    parser.add_argument('--load_jit', action='store_true', help='load jit model')
    parser.add_argument('--load_trt', action='store_true', help='load tensorrt model')
    parser.add_argument('--fp16', action='store_true', help='use fp16')
    args = parser.parse_args()
    main(args)

    # python server.py --load_jit --load_trt --fp16