import sys
import uuid

import numpy as np
import paddle
import soundfile as sf
import uvicorn as uvicorn

paddle.set_device("cpu")

from fastapi import FastAPI
from fastapi.responses import StreamingResponse, FileResponse
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from fastapi.middleware.cors import CORSMiddleware
from examples.ge2e.audio_processor import SpeakerVerificationPreprocessor
from parakeet.models.lstm_speaker_encoder import LSTMSpeakerEncoder
from parakeet.models.tacotron2 import Tacotron2
from examples.tacotron2_aishell3.chinese_g2p import convert_sentence
from examples.tacotron2_aishell3.aishell3 import voc_phones, voc_tones
from parakeet.models import ConditionalWaveFlow
from typing import Optional

"""
fastApi快速入门
https://blog.csdn.net/weixin_37254196/article/details/108143652
fastApi 返回流
https://fastapi.tiangolo.com/zh/advanced/custom-response/
Python框架FastAPI：比Flask和Tornada更高性能的API 框架
https://www.cnblogs.com/dcpeng/p/12716572.html

docker
sudo docker run --name tts_web_server -it -v $PWD:/Parakeet -p 10.17.33.63:8084:8000 tts:latest /bin/bash

# start server command
 uvicorn web:app --reload
 
 nohup uvicorn web:app --reload --host 0.0.0.0 --port 8000 &

"""
sys.path.append("../../")

# 加载模型
# speaker encoder
p = SpeakerVerificationPreprocessor(
    sampling_rate=16000,
    audio_norm_target_dBFS=-30,
    vad_window_length=30,
    vad_moving_average_width=8,
    vad_max_silence_length=6,
    mel_window_length=25,
    mel_window_step=10,
    n_mels=40,
    partial_n_frames=160,
    min_pad_coverage=0.75,
    partial_overlap_ratio=0.5)
speaker_encoder = LSTMSpeakerEncoder(n_mels=40, num_layers=3, hidden_size=256, output_size=256)
speaker_encoder_params_path = "../../pretrained/ge2e/ge2e_ckpt_0.3/step-3000000.pdparams"
speaker_encoder.set_state_dict(paddle.load(speaker_encoder_params_path))
speaker_encoder.eval()

# synthesizer
synthesizer = Tacotron2(
    vocab_size=68,
    n_tones=10,
    d_mels=80,
    d_encoder=512,
    encoder_conv_layers=3,
    encoder_kernel_size=5,
    d_prenet=256,
    d_attention_rnn=1024,
    d_decoder_rnn=1024,
    attention_filters=32,
    attention_kernel_size=31,
    d_attention=128,
    d_postnet=512,
    postnet_kernel_size=5,
    postnet_conv_layers=5,
    reduction_factor=1,
    p_encoder_dropout=0.5,
    p_prenet_dropout=0.5,
    p_attention_dropout=0.1,
    p_decoder_dropout=0.1,
    p_postnet_dropout=0.5,
    d_global_condition=256,
    use_stop_token=False,
)
params_path = "../../pretrained/tacotron2_aishell3/tacotron2_aishell3_ckpt_0.3/step-450000.pdparams"
synthesizer.set_state_dict(paddle.load(params_path))
synthesizer.eval()

# voice_coder
voice_coder = ConditionalWaveFlow(
    upsample_factors=[16, 16],
    n_flows=8,
    n_layers=8,
    n_group=16,
    channels=128,
    n_mels=80,
    kernel_size=[3, 3]
)
params_path = "../../pretrained/waveflow/waveflow_ljspeech_ckpt_0.3/step-2000000.pdparams"
voice_coder.set_state_dict(paddle.load(params_path))
voice_coder.eval()

# 生成 speaker encoding
# 女声
ref_audio_path_female = f"./ref_audio/female.wav"
mel_sequences_female = p.extract_mel_partials(p.preprocess_wav(ref_audio_path_female))
print("mel_sequences_female: ", mel_sequences_female.shape)
with paddle.no_grad():
    embed_female = speaker_encoder.embed_utterance(paddle.to_tensor(mel_sequences_female))
print("embed shape female: ", embed_female.shape)
# 男声
ref_audio_path_male = f"./ref_audio/male.wav"
mel_sequences_male = p.extract_mel_partials(p.preprocess_wav(ref_audio_path_male))
print("mel_sequences_male: ", mel_sequences_male.shape)
with paddle.no_grad():
    embed_male = speaker_encoder.embed_utterance(paddle.to_tensor(mel_sequences_male))
print("embed shape male: ", embed_male.shape)

# 启动web服务
app = FastAPI()
# 跨域
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)


class TtsModel(BaseModel):
    sentence: str = Field(None, title="要合成的句子", description="请输入要合成的句子", max_length=15, min_length=1)
    type: Optional[int] = 0
    response_type: Optional[str] = None


@app.get('/')
def index():
    return {'message': '欢迎来到 FastApi 服务！'}


@app.get('/audio/{file_id}')
def audio(file_id):
    wav_file_name = get_wav_file_name(file_id)
    return FileResponse(wav_file_name)


@app.post('/tts')
def tts(tts_model: TtsModel):
    try:
        # 合成频谱
        sentence = tts_model.sentence
        if not sentence.endswith('$'):
            sentence += '$'
        print("sentence: ", sentence)
        if 1 == tts_model.type:
            embed = embed_male
        else:
            embed = embed_female
        response_type = tts_model.response_type

        phones, tones = convert_sentence(sentence)
        print(phones)
        print(tones)

        phones = np.array([voc_phones.lookup(item) for item in phones], dtype=np.int64)
        tones = np.array([voc_tones.lookup(item) for item in tones], dtype=np.int64)

        phones = paddle.to_tensor(phones).unsqueeze(0)
        tones = paddle.to_tensor(tones).unsqueeze(0)
        utterance_embeds = paddle.unsqueeze(embed, 0)

        outputs = synthesizer.infer(phones, tones=tones, global_condition=utterance_embeds)
        mel_input = paddle.transpose(outputs["mel_outputs_postnet"], [0, 2, 1])

        # 合成语音
        print("开始合成语音……")
        with paddle.no_grad():
            wav = voice_coder.infer(mel_input)
        wav = wav.numpy()[0]
        print("语音合成结束")
        audio_id = uuid.uuid1()
        wav_file_name = get_wav_file_name(audio_id)
        sf.write(wav_file_name, wav, samplerate=22050)

        headers = {"code": "0", "msg": "success", "audio_src": f"/audio/{audio_id}"}
        if 'json' == response_type:
            return JSONResponse(headers, headers=headers)
        else:
            # 返回语音流
            return StreamingResponse(iter_file(wav_file_name), headers=headers, media_type='audio/x-wav')
    except:
        error_msg = sys.exc_info()[0]
        headers = {"code": "501", "msg": error_msg}
        return JSONResponse(headers, headers=headers)


def iter_file(wav_file_name: str):
    with open(wav_file_name, mode='rb') as file_like:
        yield from file_like


def get_wav_file_name(_id):
    wav_file_name = f"./syn_audio/w{_id}.wav"
    return wav_file_name


if __name__ == '__main__':
    uvicorn.run(app=app, port=8080)
